86>Apr 21 03:45:08 userdel[62674]: delete user 'rooter'
<86>Apr 21 03:45:08 groupadd[62785]: group added to /etc/group: name=rooter, GID=585
<86>Apr 21 03:45:08 groupadd[62785]: group added to /etc/gshadow: name=rooter
<86>Apr 21 03:45:08 groupadd[62785]: new group: name=rooter, GID=585
<86>Apr 21 03:45:08 useradd[62833]: new user: name=rooter, UID=585, GID=585, home=/root, shell=/bin/bash
<86>Apr 21 03:45:08 userdel[62948]: delete user 'builder'
<86>Apr 21 03:45:08 userdel[62948]: removed group 'builder' owned by 'builder'
<86>Apr 21 03:45:08 userdel[62948]: removed shadow group 'builder' owned by 'builder'
<86>Apr 21 03:45:08 groupadd[63090]: group added to /etc/group: name=builder, GID=586
<86>Apr 21 03:45:08 groupadd[63090]: group added to /etc/gshadow: name=builder
<86>Apr 21 03:45:08 groupadd[63090]: new group: name=builder, GID=586
<86>Apr 21 03:45:08 useradd[63146]: new user: name=builder, UID=586, GID=586, home=/usr/src, shell=/bin/bash
<13>Apr 21 03:45:11 rpmi: libopenblas-0.2.14-alt1.git20150324 1433158855 installed
<13>Apr 21 03:45:11 rpmi: libtcl-8.5.9-alt2 1351878901 installed
<13>Apr 21 03:45:11 rpmi: libexpat-2.2.4-alt0.M80P.1 1503871120 installed
<13>Apr 21 03:45:11 rpmi: libyaml2-0.1.6-alt1 1397147705 installed
<13>Apr 21 03:45:11 rpmi: libgdbm-1.8.3-alt10 1454943313 installed
<13>Apr 21 03:45:11 rpmi: tcl-8.5.9-alt2 1351878901 installed
<13>Apr 21 03:45:11 rpmi: libnumpy-py3-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:11 rpmi: libnumpy-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:12 rpmi: libxblas-1.0.248-alt1 1322010716 installed
<13>Apr 21 03:45:12 rpmi: libquadmath0-5.3.1-alt3.M80P.1 p8+225520.100.3.1 1553688800 installed
<13>Apr 21 03:45:12 rpmi: libgfortran3-5.3.1-alt3.M80P.1 p8+225520.100.3.1 1553688800 installed
<13>Apr 21 03:45:12 rpmi: liblapack-1:3.5.0-alt1 1401382194 installed
<13>Apr 21 03:45:12 rpmi: libpng15-1.5.28-alt1 1484572014 installed
<13>Apr 21 03:45:12 rpmi: libgraphite2-1.3.10-alt0.M80P.1 1496411360 installed
<13>Apr 21 03:45:12 rpmi: libX11-locales-3:1.6.3-alt1 1431956885 installed
<13>Apr 21 03:45:12 rpmi: libXdmcp-1.1.1-alt1 1334617699 installed
<13>Apr 21 03:45:12 rpmi: libXau-1.0.8-alt1 1369565807 installed
<13>Apr 21 03:45:12 rpmi: libxcb-1.12-alt2 p8.218219.300 1545313310 installed
<13>Apr 21 03:45:12 rpmi: libX11-3:1.6.3-alt1 1431956911 installed
<13>Apr 21 03:45:12 rpmi: libXrender-0.9.8-alt1 1371312110 installed
<13>Apr 21 03:45:12 rpmi: libtinfo-devel-5.9-alt8 1456756459 installed
<13>Apr 21 03:45:12 rpmi: libncurses-devel-5.9-alt8 1456756459 installed
<13>Apr 21 03:45:12 rpmi: python-modules-curses-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:12 rpmi: libverto-0.2.6-alt1_6 1455633234 installed
<13>Apr 21 03:45:12 rpmi: libkeyutils-1.5.10-alt0.M80P.2 p8+216694.100.6.1 1547827915 installed
<13>Apr 21 03:45:12 rpmi: libcom_err-1.42.13-alt2 1449075846 installed
<13>Apr 21 03:45:12 rpmi: ca-certificates-2016.02.25-alt1 1462368370 installed
<13>Apr 21 03:45:12 rpmi: libcrypto10-1.0.2n-alt0.M80P.1 1512766129 installed
<13>Apr 21 03:45:12 rpmi: libssl10-1.0.2n-alt0.M80P.1 1512766129 installed
<13>Apr 21 03:45:12 rpmi: libharfbuzz-1.6.3-alt0.M80P.1 1509918814 installed
<13>Apr 21 03:45:12 rpmi: libfreetype-2.8-alt0.M80P.3 1505462817 installed
<13>Apr 21 03:45:12 rpmi: fontconfig-2.12.6-alt1.M80P.1 1506008910 installed
Updating fonts cache: <29>Apr 21 03:45:13 fontconfig: Updating fonts cache: succeeded
[ DONE ]
<13>Apr 21 03:45:13 rpmi: libXft-2.3.2-alt1 1409902650 installed
<13>Apr 21 03:45:13 rpmi: libtk-8.5.9-alt3 1308047279 installed
<13>Apr 21 03:45:13 rpmi: tk-8.5.9-alt3 1308047279 installed
<86>Apr 21 03:45:13 groupadd[71620]: group added to /etc/group: name=_keytab, GID=499
<86>Apr 21 03:45:13 groupadd[71620]: group added to /etc/gshadow: name=_keytab
<86>Apr 21 03:45:13 groupadd[71620]: new group: name=_keytab, GID=499
<13>Apr 21 03:45:13 rpmi: libkrb5-1.14.6-alt1.M80P.1 1525355673 installed
<13>Apr 21 03:45:14 rpmi: python3-base-3.5.4-alt2.M80P.1 1527753911 installed
<13>Apr 21 03:45:14 rpmi: python-modules-compiler-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:14 rpmi: python3-module-py-1.4.34-alt0.M80P.1 1503506764 installed
<13>Apr 21 03:45:14 rpmi: python3-3.5.4-alt2.M80P.1 1527753911 installed
<13>Apr 21 03:45:14 rpmi: python-modules-email-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:14 rpmi: rpm-build-python3-0.1.10.10-alt1.M80P.1 1530521451 installed
<13>Apr 21 03:45:14 rpmi: python3-module-yaml-3.11-alt1.hg20141128.1 1459664840 installed
<13>Apr 21 03:45:14 rpmi: python-modules-unittest-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:14 rpmi: python3-module-pytest-3.2.1-alt0.M80P.1 1503499784 installed
<13>Apr 21 03:45:14 rpmi: python3-module-setuptools-1:18.5-alt0.M80P.1 1497527461 installed
<13>Apr 21 03:45:15 rpmi: python3-module-numpy-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:15 rpmi: python3-module-numpy-testing-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:15 rpmi: python-modules-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-ctypes-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-encodings-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-multiprocessing-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-logging-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-tools-2to3-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-xml-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-hotshot-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-bsddb-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-dev-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-module-py-1.4.34-alt0.M80P.1 1503506764 installed
<13>Apr 21 03:45:15 rpmi: python-modules-json-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-modules-tkinter-2.7.11-alt6.M80P.1 1527682470 installed
<13>Apr 21 03:45:15 rpmi: python-module-yaml-3.11-alt1.hg20141128.1 1459664840 installed
<13>Apr 21 03:45:15 rpmi: python-module-pytest-3.2.1-alt0.M80P.1 1503499784 installed
<13>Apr 21 03:45:16 rpmi: python-module-numpy-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:16 rpmi: python-module-numpy-testing-1:1.12.1-alt0.M80P.1 1496160663 installed
<13>Apr 21 03:45:16 rpmi: python-module-setuptools-1:18.5-alt0.M80P.1 1497527461 installed
Installing python-module-nltk-3.0.1-alt1.1.1.src.rpm
Building target platforms: x86_64
Building for target x86_64
Executing(%prep): /bin/sh -e /usr/src/tmp/rpm-tmp.91571
+ umask 022
+ /bin/mkdir -p /usr/src/RPM/BUILD
+ cd /usr/src/RPM/BUILD
+ cd /usr/src/RPM/BUILD
+ rm -rf python-module-nltk-3.0.1
+ echo 'Source #0 (python-module-nltk-3.0.1.tar):'
Source #0 (python-module-nltk-3.0.1.tar):
+ /bin/tar -xf /usr/src/RPM/SOURCES/python-module-nltk-3.0.1.tar
+ cd python-module-nltk-3.0.1
+ /bin/chmod -c -Rf u+rwX,go-w .
+ rm -rvf nltk/yaml/
+ tar xf /usr/src/RPM/SOURCES/nltk_contrib-3.0.1.tar
+ cp -fR . ../python3
+ sed -i 's|u'\''||' ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Tables.py
++ find ../python3 -type f -name '*.py'
++ grep -v 'Tables\.py'
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/setup.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/setup.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/setup.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/setup-eggs.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/setup-eggs.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/setup-eggs.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/wals.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/wals.py
--- ../python3/nltk_contrib/nltk_contrib/wals.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/wals.py (refactored)
@@ -79,13 +79,13 @@
def open_csv(filename, remove_header=True):
filename = os.path.join(data_dir, filename + '.' + file_ext)
wals_file = csv.reader(open(filename, 'r'), dialect=self.dialect)
- if remove_header: wals_file.next()
+ if remove_header: next(wals_file)
for row in wals_file:
- yield [unicode(cell, encoding) for cell in row]
+ yield [str(cell, encoding) for cell in row]
def map_fields(vectors, fields):
for vector in vectors:
- yield dict(zip(fields, vector))
+ yield dict(list(zip(fields, vector)))
# Features
self.features = dict((f['id'], f) for f in
@@ -100,14 +100,14 @@
map_fields(open_csv('languages'),
language_fields))
# convert longitude and latitude to float from string
- for l in self.languages.values():
+ for l in list(self.languages.values()):
l['latitude'] = float(l['latitude'])
l['longitude'] = float(l['longitude'])
# The datapoints file is more complicated. There is a column for
# every feature, and a row for every language. Each cell is either
# empty or contains a value dependent on the feature.
rows = open_csv('datapoints', remove_header=False)
- header = rows.next()
+ header = next(rows)
self.data = defaultdict(dict)
self.feat_lg_map = defaultdict(list)
for row in rows:
@@ -124,7 +124,7 @@
self.iso_index = defaultdict(list)
self.language_name_index = defaultdict(list)
- for lg in self.languages.values():
+ for lg in list(self.languages.values()):
for iso in lg['iso_codes'].split():
self.iso_index[iso] += [lg]
name = lg['name'].lower()
@@ -141,7 +141,7 @@
# family -> genus
# family -> subfamily -> genus
lg_hier = {}
- for lg in self.languages.values():
+ for lg in list(self.languages.values()):
family = lg_hier.setdefault(lg['family'],
LHNode(lg['family']))
family.languages[lg['wals_code']] = lg
@@ -165,12 +165,12 @@
@param wals_code: The WALS code for a language.
"""
- print self.languages[wals_code]['name'], '(%s):' % wals_code
+ print(self.languages[wals_code]['name'], '(%s):' % wals_code)
data = self.data[wals_code]
for feat in sorted(data.keys()):
- print ' ', self.features[feat]['name'], '(%s):' % feat,\
+ print(' ', self.features[feat]['name'], '(%s):' % feat,\
self.values[feat][data[feat]]['description'],\
- '(%s)' % self.values[feat][data[feat]]['value_id']
+ '(%s)' % self.values[feat][data[feat]]['value_id'])
def get_wals_codes_from_iso(self, iso_code):
"""
@@ -217,36 +217,36 @@
def demo(wals_directory=None, dialect='excel-tab', encoding='utf-8'):
if not wals_directory:
import sys
- print >>sys.stderr, 'Error: No WALS data directory provided.'
- print >>sys.stderr, ' You may obtain the database from ' +\
- 'http://wals.info/export'
+ print('Error: No WALS data directory provided.', file=sys.stderr)
+ print(' You may obtain the database from ' +\
+ 'http://wals.info/export', file=sys.stderr)
return
w = WALS(wals_directory, dialect, encoding)
# Basic statistics
- print 'In database:\n %d\tlanguages\n %d\tfeatures ' %\
- (len(w.languages), len(w.features))
+ print('In database:\n %d\tlanguages\n %d\tfeatures ' %\
+ (len(w.languages), len(w.features)))
# values are a nested dictionary (w.values[feature_id][value_id])
- num_vals = sum(map(len, w.values.vaRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/wals.py
lues()))
- print ' %d\ttotal values (%f avg. number per feature)' %\
- (num_vals, float(num_vals)/len(w.features))
+ num_vals = sum(map(len, list(w.values.values())))
+ print(' %d\ttotal values (%f avg. number per feature)' %\
+ (num_vals, float(num_vals)/len(w.features)))
# More statistics
- print " %d languages specify feature 81A (order of S, O, and V)" %\
- (len(w.get_languages_with_feature('81A')))
- print " %d langauges have VOS order" %\
- (len(w.get_languages_with_feature('81A', value='4')))
+ print(" %d languages specify feature 81A (order of S, O, and V)" %\
+ (len(w.get_languages_with_feature('81A'))))
+ print(" %d langauges have VOS order" %\
+ (len(w.get_languages_with_feature('81A', value='4'))))
# Getting language data
- print "\nGetting data for languages named 'Irish'"
+ print("\nGetting data for languages named 'Irish'")
for wals_code in w.get_wals_codes_from_name('Irish'):
l = w.languages[wals_code]
- print ' %s (ISO-639 code: %s WALS code: %s)' %\
- (l['name'], l['iso_codes'], wals_code)
- print "\nGetting data for languages with ISO 'isl'"
+ print(' %s (ISO-639 code: %s WALS code: %s)' %\
+ (l['name'], l['iso_codes'], wals_code))
+ print("\nGetting data for languages with ISO 'isl'")
for wals_code in w.get_wals_codes_from_iso('isl'):
w.show_language(wals_code)
- print "\nLocations of dialects for the Min Nan language (ISO 'nan'):"
+ print("\nLocations of dialects for the Min Nan language (ISO 'nan'):")
for wals_code in w.get_wals_codes_from_iso('nan'):
l = w.languages[wals_code]
- print " %s\tlat:%f\tlong:%f" %\
- (l['name'], l['latitude'], l['longitude'])
+ print(" %s\tlat:%f\tlong:%f" %\
+ (l['name'], l['latitude'], l['longitude']))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/utilities.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/utilities.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/utilities.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/utilities.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/utilities.py (refactored)
@@ -169,7 +169,7 @@
return dict
def items(self):
- return zip(self._keys, self.values())
+ return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys
@@ -191,9 +191,9 @@
def update(self, dict):
UserDict.update(self, dict)
- for key in dict.keys():
+ for key in list(dict.keys()):
if key not in self._keys:
self._keys.append(key)
def values(self):
- return map(self.get, self._keys)
+ return list(map(self.get, self._keys))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/text.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/text.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/text.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/text.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/text.py (refactored)
@@ -10,7 +10,7 @@
"""
import re
-from utilities import Field, SequentialDictionary
+from .utilities import Field, SequentialDictionary
from nltk.corpus.reader.toolbox import StandardFormat
@@ -185,7 +185,7 @@
def get_field_markers(self):
"""Obtain list of unique fields for the line."""
- return self._fields.keys()
+ return list(self._fields.keys())
def get_field_as_string(self,
field_marker,
@@ -224,7 +224,7 @@
def get_field_values(self):
"""Obtain list of field values for the line."""
- return self._fields.values()
+ return list(self._fields.values())
def get_label(self):
"""Obtain identifier for line."""
@@ -246,7 +246,7 @@
"""Obtain a list of morpheme objects for the line."""
morphemes = []
indices = get_indices(self.getFieldValueByFieldMarker("m"))
- print "%s" % indices
+ print("%s" % indices)
morphemeFormField = self.getFieldValueByFieldMarker("m")
morphemeGlossField = self.getFieldValueByFieldMarker("g")
morphemeFormSlices = get_slices_by_indices(morphemeFormField, indices)
@@ -519,7 +519,7 @@
"""This method finds the indices for the leftmost boundaries
of the units in a line of aligned text.
- Given the field \um, this function will find the
+ Given the field \\um, this function will find the
indices identifing leftmost word boundaries, as
follows::
@@ -527,7 +527,7 @@
| | | |
|||||||||||||||||||||||||||
\sf dit is een goede <- surface form
- \um dit is een goed -e <- underlying morphemes
+ \\um dit is een goed -e <- underlying morphemes
\mg this is a good -ADJ <- morpheme gloss
\gc DEM V ART ADJECTIVE -SUFF <- grammatical categories
\ft This is a good explanation. <- free translation
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/settings.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/settings.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/settings.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/settings.py (refactored)
@@ -45,7 +45,7 @@
"""Obtain a list of all of the field markers for the marker set.
@returns: list of field markers
@rtype: list of strings"""
- return self._dict.keys()
+ return list(self._dict.keys())
def add_field_metadata(self, fmeta) :
"""Add FieldMetadata object to dictionary of marker sets, keyed by field marker.
@@ -366,54 +366,54 @@
else :
pass
- print "----- Interlinear Process -----"
- print " FROM: [%s]" % ip.get_from_marker()
- print " TO: [%s]" % ip.get_to_marker()
- print " GLOSS SEP: [%s]" % ip.get_gloss_separator()
- print " FAIL MARK: [%s]" % ip.get_failure_marker()
- print " SHOW FAIL MARK: [%s]" % ip.show_failure_marker()
- print " SHOW ROOT GUESS: [%s]" % ip.show_root_guess()
- print " PARSE PROCESS: [%s]" % ip.is_parse_process()
+ print("----- Interlinear Process -----")
+ print(" FROM: [%s]" % ip.get_from_marker())
+ print(" TO: [%s]" % ip.get_to_marker())
+ print(" GLOSS SEP: [%s]" % ip.get_gloss_separator())
+ print(" FAIL MARK: [%s]" % ip.get_failure_marker())
+ print(" SHOW FAIL MARK: [%s]" % ip.show_failure_marker())
+ print(" SHOW ROOT GUESS: [%s]" % ip.show_root_guess())
+ print(" PARSE PROCESS: [%s]" % ip.is_parse_process())
trilook = proc.find("triLook")
if trilook :
- print " -- trilook --"
- print " DB TYPE: [%s]" % self.__parse_value(trilook, "dbtyp")
- print " MKR OUTPUT: [%s]" % self.__parse_value(trilook, "mkrOut")
+ print(" -- trilook --")
+ print(" DB TYPE: [%s]" % self.__parse_value(trilook, "dbtyp"))
+ print(" MKR OUTPUT: [%s]" % self.__parse_value(trilook, "mkrOut"))
tripref = proc.find("triPref")
if tripref :
- print " -- tripref --"
- print " DB TYPE: [%s]" % self.__parse_value(tripref, "dbtyp")
- print " MKR OUTPUT: [%s]" % self.__parse_value(tripref, "mkrOut")
+ print(" -- tripref --")
+ print(" DB TYPE: [%s]" % self.__parse_value(tripref, "dbtyp"))
+ print(" MKR OUTPUT: [%s]" % self.__parse_value(tripref, "mkrOut"))
try :
for d in tripref.findall("drflst/drf") :
- print " DB: [%s]" % self.__parse_value(d, "File")
+ print(" DB: [%s]" % self.__parse_value(d, "File"))
except :
pass
try :
for d in tripref.find("mrflst") :
- print " MKR: [%s]" % d.text
+ print(" MKR: [%s]" % d.text)
except :
pass
triroot = proc.find("triRoot")
if triroot :
- print " -- triroot --"
- print " DB TYPE: [%s]" % self.__parse_value(triroot, "dbtyp")
- print " MKR OUTPUT: [%s]" % self.__parse_value(triroot, "mkrOut")
+ print(" -- triroot --")
+ print(" DB TYPE: [%s]" % self.__parse_value(triroot, "dbtyp"))
+ print(" MKR OUTPUT: [%s]" % self.__parse_value(triroot, "mkrOut"))
try :
for d in triroot.findall("drflst/drf") :
- print " DB: [%s]" % self.__parse_value(d, "File")
+ print(" DB: [%s]" % selRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/settings.py
f.__parse_value(d, "File"))
except :
pass
try :
for d in triroot.find("mrflst") :
- print " MKR: [%s]" % d.text
+ print(" MKR: [%s]" % d.text)
except :
pass
- print ""
+ print("")
# Handle metadata for field markers (aka, marker set)
for mkr in self._tree.findall('mkrset/mkr') :
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/normalise.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/normalise.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/normalise.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/normalise.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/normalise.py (refactored)
@@ -31,7 +31,7 @@
add_default_fields(lexicon, hierarchy.default_fields)
sort_fields(lexicon, hierarchy.field_order)
add_blank_lines(lexicon, hierarchy.blanks_before, hierarchy.blanks_between)
- print to_sfm_string(lexicon, encoding='utf8')
+ print(to_sfm_string(lexicon, encoding='utf8'))
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/lexicon.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/lexicon.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/lexicon.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/lexicon.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/lexicon.py (refactored)
@@ -13,7 +13,7 @@
import os, re, sys
import nltk.data
from nltk.corpus.reader.toolbox import StandardFormat
-from utilities import Field, SequentialDictionary
+from .utilities import Field, SequentialDictionary
class Lexicon(StandardFormat):
@@ -67,7 +67,7 @@
@return: all of the entries in the Lexicon
@rtype: list of Entry objects
"""
- keys = self._entries.keys()
+ keys = list(self._entries.keys())
keys.sort()
for k in keys :
v = self._entries[k]
@@ -95,10 +95,10 @@
# Should this throw an error if a field with no values
# is used in the list of key fields?
pass
- if self._entries.has_key(key) :
+ if key in self._entries :
if unique :
msg = "Non-unique entry! \nEntry: \n%s\nKey Fields: %s\nKey: '%s'\n" % (entry, self._key_fields, key)
- raise ValueError, msg
+ raise ValueError(msg)
else :
self._entries[key] = []
# Now append entry to list of entries for key
@@ -195,7 +195,7 @@
"""
s = ""
fields = self.get_fields()
- for fm, fvs in self._fields.items():
+ for fm, fvs in list(self._fields.items()):
for fv in fvs:
s = s + "\n\\%s %s" % (fm, fv)
return s
@@ -264,7 +264,7 @@
@rtype: list of Field objects
"""
- return self._fields.values()
+ return list(self._fields.values())
def get_field_markers(self):
"""
@@ -273,7 +273,7 @@
@return: the field markers of an entry
@rtype: list
"""
- return self._fields.keys()
+ return list(self._fields.keys())
def get_values_by_marker(self, field_marker, sep=None) :
return self.get_field_values_by_field_marker(field_marker, sep)
@@ -364,7 +364,7 @@
@param value : field value
@type value : string
"""
- if self._fields.has_key(marker):
+ if marker in self._fields:
fvs = self._fields[marker]
fvs.append(value)
else:
@@ -381,7 +381,7 @@
@param fieldMarker: field marker to be deleted
@type fieldMarker: string
"""
- if self._fields.has_key(fieldMarker):
+ if fieldMarker in self._fields:
del self._fields[fieldMarker]
def demo() :
@@ -390,9 +390,9 @@
l.parse(key_fields=['lx','ps','sn'], unique_entry=False)
h = l.get_header()
for e in l.get_entries() :
- print "<%s><%s><%s>" % (e.get_field_as_string("lx", ""),
+ print("<%s><%s><%s>" % (e.get_field_as_string("lx", ""),
e.get_field_as_string("ps", ""),
- e.get_field_as_string("sn", ""))
+ e.get_field_as_string("sn", "")))
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/language.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/language.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/language.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/language.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/language.py (refactored)
@@ -44,7 +44,7 @@
for c in case_pairs.splitlines():
val = c.split()
if len(val) != 2:
- raise ValueError, '"%s" is not a valid case association' % c
+ raise ValueError('"%s" is not a valid case association' % c)
u, l = val
let_u = case[u] = Letter()
let_l = case[l] = Letter()
@@ -111,20 +111,20 @@
j = 1
for m in p:
if m in graphs:
- raise ValueError, 'primary "%s" already in sort order' % m
+ raise ValueError('primary "%s" already in sort order' % m)
graphs[m] = g = Graph()
g.type = 'p'
g.order = (i, j, unmarked)
j += 1
i += 1
- prims = graphs.keys()
+ prims = list(graphs.keys())
prims.remove(' ')
self.letter_pat = self.make_pattern(prims)
i = 1
for s in sec_pre:
if s in graphs:
- raise ValueError, 'secondary preceding "%s" already in sort order' % s
+ raise ValueError('secondary preceding "%s" already in sort order' % s)
graphs[s] = g = Graph()
g.type = 's'
g.order = i
@@ -134,13 +134,13 @@
i += 1
for s in sec_fol:
if s in graphs:
- raise ValueError, 'secondary following "%s" already in sort order' % s
+ raise ValueError('secondary following "%s" already in sort order' % s)
graphs[s] = g = Graph()
g.type = 's'
g.order = i
i += 1
- self.graph_pat = self.make_pattern(graphs.keys())
+ self.graph_pat = self.make_pattern(list(graphs.keys()))
##~ graph_list = graphs.keys()
##~
##~ # sort the longest first
@@ -167,7 +167,7 @@
if match is not None:
return match.group()
else:
- raise ValueError, 'no primary found in "%s"' % s
+ raise ValueError('no primary found in "%s"' % s)
def transform(self, s):
graphs = self.graphs
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/iu_mien_hier.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/toolbox/iu_mien_hier.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/iu_mien_hier.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/etreelib.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/etreelib.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/etreelib.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/etreelib.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/etreelib.py (refactored)
@@ -20,7 +20,7 @@
for item in children:
if isinstance(item, dict):
elem.attrib.update(item)
- elif isinstance(item, basestring):
+ elif isinstance(item, str):
if len(elem):
elem[-1].tail = (elem[-1].tail or "") + item
else:
@@ -47,7 +47,7 @@
@type item: string or ElementTree.Element
@param item: string or element appended to elem
"""
- if isinstance(item, basestring):
+ if isinstance(item, str):
if len(elem):
elem[-1].tail = (elem[-1].tail or "") + item
else:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/errors.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/toolbox/errors.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/errors.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo4.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo4.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo4.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo3.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo3.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo3.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo3.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo3.py (refactored)
@@ -42,9 +42,9 @@
num_senses += 1
for example in sense.findall('example'):
num_examples += 1
-print 'num. lexemes =', num_lexemes
-print 'num. senses =', num_senses
-print 'num. examples =', num_examples
+print('num. lexemes =', num_lexemes)
+print('num. senses =', num_senses)
+print('num. examples =', num_examples)
#another approach
-print 'num. examples =', len(lexicon.findall('.//example'))
+print('num. examples =', len(lexicon.findall('.//example')))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo2.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo2.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo2.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo2.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo2.py (refactored)
@@ -38,6 +38,6 @@
return (s)
for field in lexicon[50].getchildren():
- print "\\%s %s" % (field.tag, field.text)
+ print("\\%s %s" % (field.tag, field.text))
if field.tag == "lx":
- print "\\cv %s" % cv(field.text)
+ print("\\cv %s" % cv(field.text))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo1.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo1.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo1.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo1.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/demos/demo1.py (refactored)
@@ -21,7 +21,7 @@
for entry in lexicon.findall('record'):
num_entries += 1
sum_size += len(entry)
-print sum_size/num_entries
+print(sum_size/num_entries)
from nltk.etree.ElementTree import ElementTree
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/demos/analyse_toolbox.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/demos/analyse_toolbox.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/demos/analyse_toolbox.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/demos/analyse_toolbox.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/demos/analyse_toolbox.py (refactored)
@@ -66,7 +66,7 @@
def pattern_count(patt_dict):
n = 0
- for value in patt_dict.values():
+ for value in list(patt_dict.values()):
n += len(value)
return n
@@ -96,25 +96,25 @@
out_file.write(ET.tostring(lexicon, encoding='UTF-8'))
out_file.close()
- print 'analysing files\n%s\n' % '\n'.join(dict_names)
+ print('analysing files\n%s\n' % '\n'.join(dict_names))
if xml:
- print 'XML lexicon output in file "%s"\n' % xml
- print '====chunk grammar===='
- print gram
- print '\n'
+ print('XML lexicon output in file "%s"\n' % xml)
+ print('====chunk grammar====')
+ print(gram)
+ print('\n')
max_positions = 30
- for structure, patt_dict in analysis.items():
- print '\n\n===%s===: total= %d' %(structure, pattern_count(patt_dict))
- for pattern, positions in sorted(patt_dict.items(), key=lambda t: (-len(t[1]), t[0])):
+ for structure, patt_dict in list(analysis.items()):
+ print('\n\n===%s===: total= %d' %(structure, pattern_count(patt_dict)))
+ for pattern, positions in sorted(list(patt_dict.items()), key=lambda t: (-len(t[1]), t[0])):
if len(positions) <= max_positions:
pos_str = 'Entries: %s' % ', '.join(positions)
else:
pos_str = 'Too many entries to list.'
- print "\t%5d: %s %s" % (len(positions), ':'.join(pattern), pos_str)
- print "\n\n"
- print 'mkr\tcount\tnonblank'
+ print("\t%5d: %s %s" % (len(positions), ':'.join(pattern), pos_str))
+ print("\n\n")
+ print('mkr\tcount\tnonblank')
for mkr in mkr_counts:
- print '%s\t%5d\t%5d' % (mkr, mkr_counts.get(mkr, 0), nonblank_mkr_counts.get(mkr, 0))
+ print('%s\t%5d\t%5d' % (mkr, mkr_counts.get(mkr, 0), nonblank_mkr_counts.get(mkr, 0)))
if __name__ == "__main__":
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/data.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/data.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/data.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/data.py (refactored)
@@ -26,11 +26,11 @@
first = dict()
gram = dict()
- for sym, value in grammar.items():
+ for sym, value in list(grammar.items()):
first[sym] = value[0]
gram[sym] = value[0] + value[1]
parse_table = dict()
- for state in gram.keys():
+ for state in list(gram.keys()):
parse_table[state] = dict()
for to_sym in gram[state]:
if to_sym in grammar:
@@ -96,7 +96,7 @@
field_iter = self.fields(**kwargs)
loop = True
try:
- mkr, value = field_iter.next()
+ mkr, value = next(field_iter)
except StopIteration:
loop = False
while loop:
@@ -117,8 +117,7 @@
builder.end(state)
pstack.pop()
else:
- raise ValueError, \
- 'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
+ raise ValueError('Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr))
else:
# start of terminal marker
add = True
@@ -128,7 +127,7 @@
builder.data(value)
builder.end(mkr)
try:
- mkr, value = field_iter.next()
+ mkr, value = next(field_iter)
except StopIteration:
loop = False
else:
@@ -141,8 +140,7 @@
builder.end(state)
pstack.pop()
else:
- raise ValueError, \
- 'Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr)
+ raise ValueError('Line %d: syntax error, unexpected marker %s.' % (self.line_num, mkr))
for state, first_elems in reversed(pstack):
builder.end(state)
return builder.close()
@@ -161,7 +159,7 @@
@rtype: C{ElementTree._ElementInterface}
@return: Contents of toolbox data parsed according to rules in grammar
return parses of all the dictionary files"""
- if isinstance(file_names, types.StringTypes):
+ if isinstance(file_names, (str,)):
file_names = (file_names, )
db = toolbox.ToolboxData()
all_data = data_header = None
@@ -170,7 +168,7 @@
logging.info('about to parse %s' % fname)
try:
cur_data = db.parse(grammar, **kwargs)
- except ValueError, msg:
+ except ValueError as msg:
logging.error('%s: %s' % (fname, msg))
db.close()
continue
@@ -178,7 +176,7 @@
if all_data is not None:
header = cur_data.find('header')
if header != data_header:
- raise ValueError, "cannot combine databases with different types"
+ raise ValueError("cannot combine databases with different types")
for elem in cur_data.findall('record'):
all_data.append(elem)
else:
@@ -283,12 +281,12 @@
_months = _init_months()
fields = s.split('/')
if len(fields) != 3:
- raise ValueError, 'Invalid Toolbox date "%s"' % s
+ raise ValueError('Invalid Toolbox date "%s"' % s)
day = int(fields[0])
try:
month = _months[fields[1]]
except KeyError:
- raise ValueError, 'Invalid Toolbox date "%s"' % s
+ raise ValueError('Invalid Toolbox date "%s"' % s)
year = int(fields[2])
return date(year, month, day)
@@ -334,7 +332,7 @@
'hm': {'class': 'hm'}, # homonym
}
-char_codes = '|'.join(sorted(char_code_attribs.keys(), key=lambda s: (-len(s), s)))
+char_codes = '|'.join(sorted(list(char_coRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/data.py
de_attribs.keys()), key=lambda s: (-len(s), s)))
word_pat = re.compile(r'(%s):([^\s:;,.?!(){}\[\]]+)' % char_codes)
bar_pat = re.compile(r'\|(%s)\{([^}]*)(?:\}|$)' % char_codes)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/toolbox/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/toolbox/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/toolbox/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/toolbox/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/toolbox/__init__.py (refactored)
@@ -1,3 +1,3 @@
# __all__ = ["data", "etreelib", "errors", "lexicon", "settings", "text", "utilities"]
-from data import *
+from .data import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/timex.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/timex.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/timex.py
--- ../python3/nltk_contrib/nltk_contrib/timex.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/timex.py (refactored)
@@ -11,9 +11,9 @@
try:
from mx.DateTime import *
except ImportError:
- print """
+ print("""
Requires eGenix.com mx Base Distribution
-http://www.egenix.com/products/python/mxBase/"""
+http://www.egenix.com/products/python/mxBase/""")
# Predefined strings.
numbers = "(^a(?=\s)|one|two|three|four|five|six|seven|eight|nine|ten| \
@@ -173,8 +173,7 @@
# Find all identified timex and put them into a list
timex_regex = re.compile(r'.*?', re.DOTALL)
timex_found = timex_regex.findall(tagged_text)
- timex_found = map(lambda timex:re.sub(r'?TIMEX2.*?>', '', timex), \
- timex_found)
+ timex_found = [re.sub(r'?TIMEX2.*?>', '', timex) for timex in timex_found]
# Calculate the new date accordingly
for timex in timex_found:
@@ -189,9 +188,9 @@
timex, re.IGNORECASE)
value = split_timex[0]
unit = split_timex[1]
- num_list = map(lambda s:hashnum(s),re.findall(numbers + '+', \
- value, re.IGNORECASE))
- timex = `sum(num_list)` + ' ' + unit
+ num_list = [hashnum(s) for s in re.findall(numbers + '+', \
+ value, re.IGNORECASE)]
+ timex = repr(sum(num_list)) + ' ' + unit
# If timex matches ISO format, remove 'time' and reorder 'date'
if re.match(r'\d+[/-]\d+[/-]\d+ \d+:\d+:\d+\.\d+', timex):
@@ -351,7 +350,7 @@
def demo():
import nltk
text = nltk.corpus.abc.raw('rural.txt')[:10000]
- print tag(text)
+ print(tag(text))
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/parallel.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/utils/parallel.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/utils/parallel.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/factory.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/utils/factory.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/utils/factory.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/utils/factory.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/utils/factory.py (refactored)
@@ -21,12 +21,12 @@
switch_value = self._get_switch(*args)
try:
cls = self._switch[switch_value]
- except KeyError, e:
+ except KeyError as e:
self.raise_error(e.args[0])
return self._create_instance(cls, *args)
def raise_error(self, switch_name):
- raise MissingClassException, switch_name
+ raise MissingClassException(switch_name)
def _create_instance(self, cls, *args):
return cls(*args)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/etree_xml.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/utils/etree_xml.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/utils/etree_xml.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/utils/etree_xml.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/utils/etree_xml.py (refactored)
@@ -3,6 +3,7 @@
# Author: Torsten Marek
# Licensed under the GNU GPLv2
import logging
+import collections
__all__ = ("element_handler", "IterParseHandler", "ET")
@@ -37,17 +38,15 @@
class IterParseType(type):
def __new__(mcs, classname, bases, class_dict):
class_dict["__x_handlers__"] = handlers = {}
- for attr in class_dict.itervalues():
- if callable(attr) and hasattr(attr, HANDLER_ATTRIBUTE_NAME):
+ for attr in class_dict.values():
+ if isinstance(attr, collections.Callable) and hasattr(attr, HANDLER_ATTRIBUTE_NAME):
handlers[getattr(attr, HANDLER_ATTRIBUTE_NAME)] = attr
return type.__new__(mcs, classname, bases, class_dict)
-class IterParseHandler(object):
+class IterParseHandler(object, metaclass=IterParseType):
DELETE_BRANCH = True
-
- __metaclass__ = IterParseType
__x_handlers__ = {}
@@ -75,7 +74,7 @@
context = iter(event_source)
- event, root = context.next()
+ event, root = next(context)
self._handle_root(root)
for event, elem in context:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/enum.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/utils/enum.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/utils/enum.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/utils/enum.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/utils/enum.py (refactored)
@@ -25,7 +25,7 @@
dct["__slots__"] = ("__value__", "__name__")
dct["__members__"] = members = {}
- for name, obj in dct.iteritems():
+ for name, obj in dct.items():
if isinstance(obj, _EnumMember):
members[name] = obj.fields
obj._name = name
@@ -44,10 +44,10 @@
field_count = len(type.__getattribute__(mcs, "__fields__"))
- for member_name, field_values in decl.iteritems():
+ for member_name, field_values in decl.items():
members[member_name] = mcs()
if len(field_values) != field_count:
- raise TypeError, (
+ raise TypeError(
"Wrong number of fields for enum member '%s'. Expected %i, got %i instead." % (
member_name, field_count, len(field_values)))
else:
@@ -57,19 +57,19 @@
type.__setattr__(mcs, "__members__", members)
def __setattr__(mcs, name, value):
- raise TypeError, "enum types cannot be modified"
+ raise TypeError("enum types cannot be modified")
def __delattr__(mcs, name):
- raise TypeError, "enum types cannot be modified"
+ raise TypeError("enum types cannot be modified")
def names(mcs):
- return type.__getattribute__(mcs, '__members__').keys()
+ return list(type.__getattribute__(mcs, '__members__').keys())
def __len__(mcs):
return len(type.__getattribute__(mcs, '__members__'))
def __iter__(mcs):
- return type.__getattribute__(mcs, '__members__').itervalues()
+ return iter(type.__getattribute__(mcs, '__members__').values())
def __contains__(mcs, name):
return name in type.__getattribute__(mcs, '__members__')
@@ -78,10 +78,7 @@
return "" % (mcs.__module__, mcs.__name__)
-class Enum(object):
- __metaclass__ = _EnumType
-
- # assigned by metaclass
+class Enum(object, metaclass=_EnumType):
__value__ = ()
__name__ = ""
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/db.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/utils/db.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/utils/db.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/utils/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/tigerxml.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/tigerxml.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/tigerxml.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/tigerxml.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/tigerxml.py (refactored)
@@ -119,7 +119,7 @@
"""
for node_elem in node_elems:
node = node_cls(node_elem.get("id"))
- for feature_name, value in node_elem.attrib.iteritems():
+ for feature_name, value in node_elem.attrib.items():
if feature_name == "id":
continue
node.features[feature_name] = value
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/tsqlparser.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/tsqlparser.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/tsqlparser.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/tsqlparser.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/tsqlparser.py (refactored)
@@ -71,13 +71,13 @@
and can have parentheses for grouping.
"""
ops = [
- (suppressed_literal(u"!"), 1, pyparsing.opAssoc.RIGHT,
+ (suppressed_literal("!"), 1, pyparsing.opAssoc.RIGHT,
lambda s, l, t: ast.Negation(t[0][0])),
- (suppressed_literal(u"&"), 2, pyparsing.opAssoc.LEFT,
+ (suppressed_literal("&"), 2, pyparsing.opAssoc.LEFT,
lambda s, l, t: ast.Conjunction(t.asList()[0])),
- (suppressed_literal(u"|"), 2, pyparsing.opAssoc.LEFT,
+ (suppressed_literal("|"), 2, pyparsing.opAssoc.LEFT,
lambda s, l, t: ast.Disjunction(t.asList()[0]))]
return pyparsing.operatorPrecedence(atom, ops)
@@ -145,7 +145,7 @@
"""
assert all(len(pfx) == 1 for pfx in type_prefixes), "prefix list may only contain characters"
- v_expr = pyparsing.Combine(pyparsing.oneOf(type_prefixes.keys()) +
+ v_expr = pyparsing.Combine(pyparsing.oneOf(list(type_prefixes.keys())) +
pyparsing.Word(pyparsing.alphanums + "_")).setResultsName("varname")
v_expr.type_map = type_prefixes
return v_expr
@@ -175,7 +175,7 @@
:Named Results:
- `expr`: the right-hand side of the definition
"""
- definition = (variable_expr + suppressed_literal(u":") +
+ definition = (variable_expr + suppressed_literal(":") +
right_hand.setResultsName("expr"))
return definition.setParseAction(
lambda s, l, t: ast.VariableDefinition(
@@ -225,7 +225,7 @@
:AST Node: `FeatureConstraint`
:Example: ``cat="NP"``, ``pos!=/N+/``, ``word="safe" & pos="NN"``
"""
- op = pyparsing.oneOf(u"= !=")
+ op = pyparsing.oneOf("= !=")
v = FEATURE_VALUE
constraint = (WORD + op + v)
@@ -245,7 +245,7 @@
:AST Node: `NodeDescription`
:Example: ``[pos="PREP" & word=("vor"|"vorm")]``, ``[T]``, ``[#a:(word = "safe")]``, ``[#b]``
"""
- node_desc = surround(u"[", FEATURE_CONSTRAINT, u"]")
+ node_desc = surround("[", FEATURE_CONSTRAINT, "]")
return node_desc.setParseAction(single_value_holder(ast.NodeDescription))
NODE_DESCRIPTION = node_description()
@@ -423,7 +423,7 @@
arg = (NODE_OPERAND | integer_literal()).setResultsName("args", listAllMatches = True)
identifier = WORD("pred")
- return (identifier + surround(u"(", pyparsing.delimitedList(arg), u")")
+ return (identifier + surround("(", pyparsing.delimitedList(arg), ")")
).setParseAction(lambda s, l, t: ast.Predicate(t.pred, t.args.asList()))
@@ -444,7 +444,7 @@
"""
atom = (node_predicate() | node_relation_constraint() | NODE_OPERAND)
- expr = pyparsing.Group(atom + pyparsing.OneOrMore(suppressed_literal(u"&") + atom)
+ expr = pyparsing.Group(atom + pyparsing.OneOrMore(suppressed_literal("&") + atom)
).setParseAction(lambda s, l, t: ast.Conjunction(t.asList()[0])) | atom
expr.setParseAction(single_value_holder(ast.TsqlExpression))
@@ -463,5 +463,5 @@
"""
try:
return self._g.parseString(query_string)[0]
- except pyparsing.ParseException, e:
- raise TigerSyntaxError, e
+ except pyparsing.ParseException as e:
+ raise TigerSyntaxError(e)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/result.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/result.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/result.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/result.py (refactored)
@@ -13,7 +13,7 @@
import operator
import multiprocessing
from functools import partial
-from itertools import count, izip
+from itertools import count
from collections import defaultdict
from nltk_contrib.tiger.index import IndexNodeId
@@ -43,27 +43,27 @@
def partition_variables(variables, constraints):
- var_connections = dict(izip(variables, count()))
+ var_connections = dict(zip(variables, count()))
for l, r in constraints:
new_id = var_connections[l]
old_id = var_connections[r]
- for name, value in var_connections.iteritems():
+ for name, value in var_connections.items():
if value == old_id:
var_connections[name] = new_id
sets = defaultdict(set)
- for name, value in var_connections.iteritems():
+ for name, value in var_connections.items():
sets[value].add(name)
- return sets.values()
+ return list(sets.values())
class ConstraintChecker(object):
@classmethod
def _nodevar_idx_combinations(cls, ordered_node_vars):
return [(upper_key, lower_key)
- for lower_key in xrange(1, len(ordered_node_vars))
- for upper_key in xrange(lower_key)]
+ for lower_key in range(1, len(ordered_node_vars))
+ for upper_key in range(lower_key)]
@classmethod
def _get_node_variables(cls, constraints):
@@ -160,7 +160,7 @@
of the module.
"""
if self.has_results:
- g = [item for item in self.nodes.items() if not item[0].is_set]
+ g = [item for item in list(self.nodes.items()) if not item[0].is_set]
return [self._nodeids(query_result)
for query_result in named_cross_product(g)
@@ -187,7 +187,7 @@
if query_context.checked_graphs == PREPARE_NEW_AFTER:
query_context.checker_factory = ConstraintChecker.prepare(query_context.constraints, query_context.node_counts)
elif query_context.checked_graphs < PREPARE_NEW_AFTER:
- for node_var, node_ids in graph_results.iteritems():
+ for node_var, node_ids in graph_results.items():
query_context.node_counts[node_var] += len(node_ids)
c = query_context.checker_factory(graph_results, query_context)
@@ -198,7 +198,7 @@
def __init__(self, nodes, query_context):
query_context.checked_graphs += 1
self._nodes = [(node_var.name, [IndexNodeId.from_int(nid) for nid in node_ids])
- for node_var, node_ids in nodes.iteritems()
+ for node_var, node_ids in nodes.items()
if not node_var.is_set]
self._size = product((len(ids) for var, ids in self._nodes), 1)
@@ -230,7 +230,7 @@
self.checker_factory = ConstraintChecker.prepare(constraints)
self.constraint_checker = cct_search
else:
- raise MissingFeatureError, "Missing feature: disjoint constraint sets. Please file a bug report."
+ raise MissingFeatureError("Missing feature: disjoint constraint sets. Please file a bug report.")
self._reset_stats()
def _reset_stats(self):
@@ -264,7 +264,7 @@
class ResultBuilder(QueryContext, ResultBuilderBase):
def __init__(self, ev_context, node_descriptions, predicates, constraints):
- QueryContext.__init__(self, ev_context.db, constraints, node_descriptions.keys())
+ QueryContext.__init__(self, ev_context.db, constraints, list(node_descriptions.keys()))
ResultBuilderBase.__init__(self, node_descriptions, predicates)
self._nodesearcher = ev_context.nodesearcher
@@ -277,9 +277,9 @@
self._reset_stats()
matching_graphs = self._nodesearcher.search_nodes(self._nodes, self._predicates)
- return filter(operator.itemgetter(1),
+ return list(filter(operator.itemgetter(RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/result.py
1),
((graph_id, self.constraint_checker(nodes, self))
- for graph_id, nodes in matching_graphs))
+ for graph_id, nodes in matching_graphs)))
class ParallelEvaluatorContext(object):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/querybuilder.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/query/querybuilder.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/querybuilder.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/predicates.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/predicates.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/predicates.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/predicates.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/predicates.py (refactored)
@@ -72,17 +72,17 @@
expected_ast_type, mandatory = formal_arg
try:
if invocation_args[idx].TYPE is not expected_ast_type:
- raise PredicateTypeError, (
+ raise PredicateTypeError(
"Type Error in argument %i of '%s': Expected %s, got %s" % \
(idx, name, invocation_args[idx].TYPE, expected_ast_type))
except IndexError:
if mandatory:
- raise PredicateTypeError, "Missing arguments for '%s'." % (name, )
+ raise PredicateTypeError("Missing arguments for '%s'." % (name, ))
else:
break
else:
if idx + 1 != len(invocation_args):
- raise PredicateTypeError, "Too many arguments for predicate '%s'." % (name, )
+ raise PredicateTypeError("Too many arguments for predicate '%s'." % (name, ))
variable = invocation_args[0].variable
if variable.container not in cls.__ref_types__:
raise PredicateTypeError("Predicate '%s' not valid for container type '%s'." % (
@@ -273,7 +273,7 @@
def raise_error(self, pred_name):
"""Raises an `UndefinedNameError` for unknown predicates."""
- raise UndefinedNameError, (UndefinedNameError.PREDICATE, pred_name)
+ raise UndefinedNameError(UndefinedNameError.PREDICATE, pred_name)
def _create_instance(self, cls, pred_ast):
"""Creates a new predicate using the class factory method."""
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/nodesearcher.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/nodesearcher.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/nodesearcher.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/nodesearcher.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/nodesearcher.py (refactored)
@@ -83,10 +83,10 @@
self._length = length
def __iter__(self):
- return ((0, graph_id) for graph_id in xrange(self._length))
-
-
- get_temp_table = ("_temp_regex_table_%i_%i" % (os.getpid(), c) for c in count()).next
+ return ((0, graph_id) for graph_id in range(self._length))
+
+
+ get_temp_table = ("_temp_regex_table_%i_%i" % (os.getpid(), c) for c in count()).__next__
MATCH = True
NO_MATCH = False
@@ -146,8 +146,7 @@
for literal in literals:
if literal.TYPE is ast.StringLiteral:
if has_explicit_match:
- raise ConflictError, \
- "Feature '%s' has two conflicting constraints." % (feature_name, )
+ raise ConflictError("Feature '%s' has two conflicting constraints." % (feature_name, ))
has_explicit_match = True
@@ -169,7 +168,7 @@
def cleanup_temporary_tables(self):
"""Drops all temporary tables created for regex matches."""
- for table_name in self._temp_tables.itervalues():
+ for table_name in self._temp_tables.values():
self._db.execute("DROP TABLE %s" % (table_name, ))
self._temp_tables = {}
@@ -333,7 +332,7 @@
def _get_set_predicates(self):
"""Returns a list `(var_name, predicate)` containing all constraints on node sets."""
return [(name, pred)
- for name, node_predicates in self._predicates.iteritems()
+ for name, node_predicates in self._predicates.items()
if name.is_set
for pred in node_predicates
if not pred.FOR_NODE]
@@ -371,7 +370,7 @@
def _read_tips(self):
for node_variable, node_iter, tips in self._node_cursors:
try:
- tips[node_variable] = node_iter.next()
+ tips[node_variable] = next(node_iter)
except StopIteration:
if self._remove_iter(node_variable, node_iter):
raise EmptyResultException
@@ -381,14 +380,14 @@
for node_var in self._node_vars:
current_graph[node_var] = []
- for target, source in self._shared_variables.iteritems():
+ for target, source in self._shared_variables.items():
current_graph[target] = current_graph[source]
return current_graph
@staticmethod
def _dump_nodes(from_iter, current_tip, next_graph):
while current_tip[1] < next_graph:
- current_tip = from_iter.next()
+ current_tip = next(from_iter)
return current_tip
def _find_graphs(self):
@@ -411,7 +410,7 @@
while current_tip[1] == min_graphid:
node_list.append(current_tip[0])
- current_tip = node_iter.next()
+ current_tip = next(node_iter)
tips[varname] = current_tip
if not varname.is_set and current_tip[1] > max_graphid:
max_graphid = current_tip[1]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/node_variable.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/node_variable.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/node_variable.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/node_variable.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/node_variable.py (refactored)
@@ -55,7 +55,7 @@
elif self.var_type is NodeType.UNKNOWN:
self.var_type = new_type
else:
- raise TigerTypeError, self._name
+ raise TigerTypeError(self._name)
name = property(attrgetter("_name"), doc = "The name of the variable")
is_set = property(attrgetter("_is_set"),
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/factory.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/factory.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/factory.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/factory.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/factory.py (refactored)
@@ -51,7 +51,7 @@
try:
self._types.add(self._type_assoc[node.feature])
except KeyError:
- raise UndefinedNameError, (UndefinedNameError.FEATURE, node.feature)
+ raise UndefinedNameError(UndefinedNameError.FEATURE, node.feature)
return self.STOP
@ast_visitor.node_handler(ast.FeatureRecord)
@@ -72,7 +72,7 @@
for disj in self._disjoints:
if len(disj) == 2:
- raise TigerTypeError, node_variable.name
+ raise TigerTypeError(node_variable.name)
else:
node_var_type.update(disj)
@@ -94,7 +94,7 @@
Anonymous node descriptions will be wrapped into a variable definition with an
automatically generated, globally unique variable name.
"""
- get_anon_nodevar = (":anon:%i" % (c, ) for c in count()).next
+ get_anon_nodevar = (":anon:%i" % (c, ) for c in count()).__next__
constraint_factory = ConstraintFactory()
predicate_factory = PredicateFactory()
@@ -220,7 +220,7 @@
This mechanism is different from handling of feature records. The type predicate
is added to each disjunct, while the feature record can differ between each disjunct.
"""
- for node_variable, description in self.node_defs.iteritems():
+ for node_variable, description in self.node_defs.items():
if description.expression.TYPE is ast.Nop and len(predicates[node_variable]) == 0 \
and node_variable.var_type is not NodeType.UNKNOWN:
predicates[node_variable].append(NodeTypePredicate(node_variable.var_type))
@@ -239,7 +239,7 @@
"""Processes the collected items and returns the query object."""
predicates = defaultdict(list)
- for node_variable, node_desc in self.node_defs.iteritems():
+ for node_variable, node_desc in self.node_defs.items():
self.nodedesc_normalizer.run(node_desc)
node_var_type, has_frec = self._ntyper.run(node_desc, node_variable)
node_variable.refine_type(node_var_type)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/exceptions.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/query/exceptions.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/exceptions.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/evaluator.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/query/evaluator.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/evaluator.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/constraints.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/constraints.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/constraints.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/constraints.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/constraints.py (refactored)
@@ -7,7 +7,7 @@
The code and the interfaces of this module are still subject to change. Please refer to the
inline comments for more information.
"""
-from __future__ import with_statement
+
from nltk_contrib.tiger.query.exceptions import UndefinedNameError
from nltk_contrib.tiger.graph import NodeType
@@ -171,7 +171,7 @@
def guarded(func, exc_type, new_exc_factory, *args, **kwargs):
try:
return func(*args, **kwargs)
- except exc_type, e:
+ except exc_type as e:
raise new_exc_factory(e)
from contextlib import contextmanager
@@ -180,8 +180,8 @@
def convert_exception(exc_type, new_exc_type, args = lambda exc: exc.args):
try:
yield
- except exc_type, e:
- raise new_exc_type, args(e)
+ except exc_type as e:
+ raise new_exc_type(args(e))
def _get_label_id(label, dct, domain):
with convert_exception(KeyError, UndefinedNameError, lambda exc: (domain, exc.args[0])):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_visitor.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_visitor.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_visitor.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_visitor.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_visitor.py (refactored)
@@ -38,7 +38,7 @@
def __new__(mcs, classname, bases, class_dict):
switch = {}
post_switch = {}
- for obj in class_dict.itervalues():
+ for obj in class_dict.values():
for n in getattr(obj, "node_types", []):
switch[n] = obj
@@ -50,7 +50,7 @@
return type.__new__(mcs, classname, bases, class_dict)
-class AstVisitor(object):
+class AstVisitor(object, metaclass=AstVisitorType):
"""
The base class for AST visitors.
@@ -79,7 +79,6 @@
.. [#] Changing the visitor code to allow this is easy, but the use case has not come up yet.
"""
- __metaclass__ = AstVisitorType
STOP = (0, None)
CONTINUE = lambda s, n: (1, n)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_utils.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_utils.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_utils.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_utils.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/ast_utils.py (refactored)
@@ -85,8 +85,8 @@
# !(pos="ART") === !(T & pos="ART") === !(T) | (pos != "ART")
try:
orig_type = self._feature_types[child_node.expression.feature]
- except KeyError, e:
- raise UndefinedNameError, (UndefinedNameError.FEATURE, e.args[0])
+ except KeyError as e:
+ raise UndefinedNameError(UndefinedNameError.FEATURE, e.args[0])
return self.REPLACE(
ast.Disjunction([
ast.FeatureRecord(~orig_type),
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/ast.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/query/ast.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/ast.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/query/ast.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/query/ast.py (refactored)
@@ -59,7 +59,7 @@
to replace an existing child node via `set_child`.
"""
from operator import attrgetter
-from itertools import izip
+
from nltk_contrib.tiger.utils.enum import Enum, enum_member
# TODO: figure out how to support slots in class hierarchies. Currently,
@@ -100,14 +100,14 @@
def __new__(cls, *args, **kwargs):
if cls._is_abstract(cls.__name__):
- raise TypeError, "cannot instantiate abstract class '%s'." % (cls, )
+ raise TypeError("cannot instantiate abstract class '%s'." % (cls, ))
else:
return object.__new__(cls)
def __init__(self, *args):
assert len(args) == len(self.__slots__), \
(self.__class__.__name__, args, self.__slots__)
- for name, value in izip(self.__slots__, args):
+ for name, value in zip(self.__slots__, args):
setattr(self, name, value)
@staticmethod
@@ -131,7 +131,7 @@
return False
def __repr__(self):
- return u"%s(%s)" % (self.__class__.__name__,
+ return "%s(%s)" % (self.__class__.__name__,
",".join(repr(getattr(self, v)) for v in self.__slots__))
def __iter__(self):
@@ -160,7 +160,7 @@
`new_child` must be a subclass of `_Node`.
"""
if self.is_leaf():
- raise TypeError, "cannot set children on leaf nodes"
+ raise TypeError("cannot set children on leaf nodes")
else:
assert isinstance(getattr(self, name_tag), _Node)
assert isinstance(new_child, _Node)
@@ -252,7 +252,7 @@
_Node.__init__(self, left_operand, right_operand, modifiers)
def __repr__(self):
- return u"%s(%s, %s, **%s)" % (self.__class__.__name__,
+ return "%s(%s, %s, **%s)" % (self.__class__.__name__,
self.left_operand, self.right_operand, self.modifiers)
@classmethod
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/query/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/query/__init__.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/query/__init__.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/indexer/tiger_corpus_indexer.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/indexer/tiger_corpus_indexer.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/indexer/tiger_corpus_indexer.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/indexer/tiger_corpus_indexer.py (refactored)
@@ -94,11 +94,11 @@
def _add_index_metadata(self, **kwargs):
self._cursor.executemany("INSERT INTO index_metadata (key, value) VALUES (?, ?)",
- kwargs.iteritems())
+ iter(kwargs.items()))
def set_metadata(self, metadata):
self._cursor.executemany("INSERT INTO corpus_metadata (key, value) VALUES (?, ?)",
- metadata.iteritems())
+ iter(metadata.items()))
def add_feature(self, feature_name, domain, feature_values):
@@ -113,9 +113,9 @@
self._cursor.executemany("INSERT INTO feature_values (feature_id, value_id, value, description) VALUES (?, ?, ?, ?)",
((feature_id, value_map[value], value, description)
- for value, description in feature_values.iteritems()))
+ for value, description in feature_values.items()))
else:
- value_map = defaultdict(count().next)
+ value_map = defaultdict(count().__next__)
self._open_list_features.append((feature_id, value_map))
self._feature_value_maps[feature_name] = (value_map, domain)
@@ -129,13 +129,13 @@
assert DEFAULT_VROOT_EDGE_LABEL in edge_labels, "no neutral edge label"
self._cursor.executemany("INSERT INTO edge_labels (id, label, description) VALUES (?, ?, ?)",
- ((idx, e[0], e[1]) for idx, e in enumerate(edge_labels.iteritems())))
+ ((idx, e[0], e[1]) for idx, e in enumerate(edge_labels.items())))
self._edge_label_map = dict(self._cursor.execute("SELECT label, id FROM edge_labels"))
self._serializer.set_edge_label_map(self._edge_label_map)
def set_secedge_labels(self, secedge_labels):
self._cursor.executemany("INSERT INTO secedge_labels (id, label, description) VALUES (?, ?, ?)",
- ((idx, e[0], e[1]) for idx, e in enumerate(secedge_labels.iteritems())))
+ ((idx, e[0], e[1]) for idx, e in enumerate(secedge_labels.items())))
self._secedge_label_map = dict(self._cursor.execute("SELECT label, id FROM secedge_labels"))
self._serializer.set_secedge_label_map(self._secedge_label_map)
@@ -174,7 +174,7 @@
def _index_feature_values(self, graph, node_ids):
for node in graph:
- for feature_name, feature_value in node.features.iteritems():
+ for feature_name, feature_value in node.features.items():
value_map, domain = self._feature_value_maps[feature_name]
assert node.TYPE is domain
self._insert_lists[feature_name].append((node_ids[node.id].to_int(), value_map[feature_value]))
@@ -192,7 +192,7 @@
for label, target_node in node.secedges))
def _flush_node_feature_values(self):
- for feature_name, values in self._insert_lists.iteritems():
+ for feature_name, values in self._insert_lists.items():
self._cursor.executemany(self._feature_iidx_stmts[feature_name], values)
self._insert_lists = defaultdict(list)
@@ -203,7 +203,7 @@
graph.id = self._graphs
graph.root_id = node_ids[graph.root_id]
- for xml_node_id in graph.nodes.keys():
+ for xml_node_id in list(graph.nodes.keys()):
node = graph.nodes.pop(xml_node_id)
node.id = node_ids[node.id]
graph.nodes[node_ids[xml_node_id]] = node
@@ -215,7 +215,7 @@
def add_graph(self, graph):
try:
roots = graph.get_roots()
- except KeyError, e:
+ except KeyError as RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/indexer/tiger_corpus_indexer.py
e:
logging.error("Graph %s is faulty: node %s referenced more than once.",
graph.id, e.args[0])
return
@@ -239,42 +239,42 @@
(self._graphs, xml_id, buffer(self._serializer.serialize_graph(graph))))
self._graphs += 1
if self._progress and self._graphs % 100 == 0:
- print self._graphs
+ print(self._graphs)
def finalize(self, optimize = True):
if self._progress:
- print "finalize"
+ print("finalize")
self._flush_node_feature_values()
if self._progress:
- print "inserting feature values"
+ print("inserting feature values")
for feature_id, feature_value_map in self._open_list_features:
self._cursor.executemany("INSERT INTO feature_values (feature_id, value_id, value) VALUES (?, ?, ?)",
((feature_id, value_id, value)
- for value, value_id in feature_value_map.iteritems()))
+ for value, value_id in feature_value_map.items()))
del self._open_list_features
if self._progress:
- print "Committing database"
+ print("Committing database")
self._db.commit()
self._cursor.execute("CREATE INDEX feature_id_idx ON feature_values (feature_id)")
for feature_name in self._feature_value_maps:
if self._progress:
- print "creating index for feature '%s'" % (feature_name,)
+ print("creating index for feature '%s'" % (feature_name,))
self._cursor.execute("CREATE INDEX %s_iidx_idx ON feature_iidx_%s (value_id)" % (feature_name, feature_name))
if self._progress:
- print "creating index for xml node ids"
+ print("creating index for xml node ids")
self._cursor.execute("CREATE UNIQUE INDEX xml_node_id_idx ON node_data (xml_node_id)")
if self._progress:
- print "creating index for xml graph ids"
+ print("creating index for xml graph ids")
self._cursor.execute("CREATE UNIQUE INDEX xml_graph_id_idx ON graphs (xml_graph_id)")
if self._progress:
- print "creating secedge indices"
+ print("creating secedge indices")
self._cursor.execute("CREATE INDEX se_origin_idx ON secedges (origin_id)")
self._cursor.execute("CREATE INDEX se_target_idx ON secedges (target_id)")
@@ -282,7 +282,7 @@
if optimize:
if self._progress:
- print "Optimizing database"
+ print("Optimizing database")
self._db.execute("VACUUM")
self._add_index_metadata(finished = True)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/indexer/graph_serializer.py (refactored)
@@ -2,8 +2,8 @@
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/indexer/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/index.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/tiger/index.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/index.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/graph.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/graph.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/graph.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/graph.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/graph.py (refactored)
@@ -26,7 +26,7 @@
elif type_key == "N":
return NodeType.NONTERMINAL
else:
- raise ValueError, "Unknown domain key '%s'." % (type_key,)
+ raise ValueError("Unknown domain key '%s'." % (type_key,))
def __invert__(self):
if self is self.__class__.TERMINAL:
@@ -34,7 +34,7 @@
elif self is self.__class__.NONTERMINAL:
return self.__class__.TERMINAL
else:
- raise ValueError, "Cannot invert '%s'." % (self,)
+ raise ValueError("Cannot invert '%s'." % (self,))
class _TigerNode(object):
@@ -121,7 +121,7 @@
self.id = id_
self.edge_label = None
self.gorn_address = None
- for name, value in kwargs.iteritems():
+ for name, value in kwargs.items():
setattr(self, name, value)
@@ -154,13 +154,13 @@
return not (self == other)
def __iter__(self):
- return self.nodes.itervalues()
+ return iter(self.nodes.values())
def terminals(self):
- return (n for n in self.nodes.itervalues() if n.TYPE is NodeType.TERMINAL)
+ return (n for n in self.nodes.values() if n.TYPE is NodeType.TERMINAL)
def nonterminals(self):
- return (n for n in self.nodes.itervalues() if n.TYPE is NodeType.NONTERMINAL)
+ return (n for n in self.nodes.values() if n.TYPE is NodeType.NONTERMINAL)
def copy(self):
g = TigerGraph(self.id)
@@ -189,7 +189,7 @@
self._compute_dominance(terminals, nonterminals)
self._compute_corners(nonterminals)
- return nonterminals.values(), terminals.values()
+ return list(nonterminals.values()), list(terminals.values())
def _compute_corners(self, nonterminals):
def traverse(node_id, node_data):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/demo.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/demo.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/tiger/demo.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/demo.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/demo.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/demo.py (refactored)
@@ -2,7 +2,7 @@
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/corpus.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/corpus.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/corpus.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/corpus.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/corpus.py (refactored)
@@ -87,11 +87,11 @@
self._evaluator = None
def _get_edge_label_rmap(self):
- return [unicode(r[0])
+ return [str(r[0])
for r in self._cursor.execute("SELECT label FROM edge_labels ORDER BY id")]
def _get_secedge_label_rmap(self):
- return [unicode(r[0])
+ return [str(r[0])
for r in self._cursor.execute("SELECT label FROM secedge_labels ORDER BY id")]
def _get_domain_features(self, domain):
@@ -107,7 +107,7 @@
for r in self._cursor.execute(
"SELECT value FROM feature_values WHERE feature_id = ? ORDER BY value_id",
(row[0],)):
- values.append(unicode(r[0]))
+ values.append(str(r[0]))
return l
def __iter__(self):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tiger/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tiger/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tiger/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/tiger/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tiger/__init__.py (refactored)
@@ -34,7 +34,7 @@
class EmptyDbProvider(object):
def connect(self):
- raise RuntimeError, "cannot reopen memory-only db"
+ raise RuntimeError("cannot reopen memory-only db")
def can_reconnect(self):
return False
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/textgrid.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/textgrid.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/textgrid.py
--- ../python3/nltk_contrib/nltk_contrib/textgrid.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/textgrid.py (refactored)
@@ -150,7 +150,7 @@
for tier in self.tiers:
yield tier
- def next(self):
+ def __next__(self):
if self.idx == (self.size - 1):
raise StopIteration
self.idx += 1
@@ -447,23 +447,23 @@
return self.__repr__() + "\n " + "\n ".join(" ".join(row) for row in self.simple_transcript)
def demo_TextGrid(demo_data):
- print "** Demo of the TextGrid class. **"
+ print("** Demo of the TextGrid class. **")
fid = TextGrid(demo_data)
- print "Tiers:", fid.size
+ print("Tiers:", fid.size)
for i, tier in enumerate(fid):
- print "\n***"
- print "Tier:", i + 1
- print tier
+ print("\n***")
+ print("Tier:", i + 1)
+ print(tier)
def demo():
# Each demo demonstrates different TextGrid formats.
- print "Format 1"
+ print("Format 1")
demo_TextGrid(demo_data1)
- print "\nFormat 2"
+ print("\nFormat 2")
demo_TextGrid(demo_data2)
- print "\nFormat 3"
+ print("\nFormat 3")
demo_TextGrid(demo_data3)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tag/tnt.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/tag/tnt.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/tag/tnt.py
--- ../python3/nltk_contrib/nltk_contrib/tag/tnt.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/tag/tnt.py (refactored)
@@ -66,8 +66,8 @@
f = None
try:
if verbose:
- print 'Begin input file creation'
- print 'input_file=%s' % input_file
+ print('Begin input file creation')
+ print('input_file=%s' % input_file)
f = open(input_file, 'w')
words = tokenize.WhitespaceTokenizer().tokenize(sentence)
@@ -75,21 +75,21 @@
f.write('%s\n' % word)
f.write('\n')
f.close()
- if verbose: print 'End input file creation'
+ if verbose: print('End input file creation')
if verbose:
- print 'tnt_bin=%s' % tnt_bin
- print 'model_path=%s' % model_path
- print 'output_file=%s' % output_file
+ print('tnt_bin=%s' % tnt_bin)
+ print('model_path=%s' % model_path)
+ print('output_file=%s' % output_file)
execute_string = execute_string % (tnt_bin, model_path, input_file, output_file)
if verbose:
- print 'execute_string=%s' % execute_string
+ print('execute_string=%s' % execute_string)
- if verbose: print 'Begin tagging'
+ if verbose: print('Begin tagging')
tnt_exit = os.system(execute_string)
- if verbose: print 'End tagging (exit code=%s)' % tnt_exit
+ if verbose: print('End tagging (exit code=%s)' % tnt_exit)
f = open(output_file, 'r')
lines = f.readlines()
@@ -105,7 +105,7 @@
if verbose:
for tag in tagged_words:
- print tag
+ print(tag)
finally:
if f: f.close()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/tag/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/stringcomp.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/stringcomp.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/stringcomp.py
--- ../python3/nltk_contrib/nltk_contrib/stringcomp.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/stringcomp.py (refactored)
@@ -89,20 +89,20 @@
def demo ():
- print "Comparison between 'python' and 'python': %.2f" % stringcomp("python", "python")
- print "Comparison between 'python' and 'Python': %.2f" % stringcomp("python", "Python")
- print "Comparison between 'NLTK' and 'NTLK': %.2f" % stringcomp("NLTK", "NTLK")
- print "Comparison between 'abc' and 'def': %.2f" % stringcomp("abc", "def")
+ print("Comparison between 'python' and 'python': %.2f" % stringcomp("python", "python"))
+ print("Comparison between 'python' and 'Python': %.2f" % stringcomp("python", "Python"))
+ print("Comparison between 'NLTK' and 'NTLK': %.2f" % stringcomp("NLTK", "NTLK"))
+ print("Comparison between 'abc' and 'def': %.2f" % stringcomp("abc", "def"))
- print "Word most similar to 'australia' in list ['canada', 'brazil', 'egypt', 'thailand', 'austria']:"
+ print("Word most similar to 'australia' in list ['canada', 'brazil', 'egypt', 'thailand', 'austria']:")
max_score = 0.0 ; best_match = None
for country in ["canada", "brazil", "egypt", "thailand", "austria"]:
score = stringcomp("australia", country)
if score > max_score:
best_match = country
max_score = score
- print "(comparison between 'australia' and '%s': %.2f)" % (country, score)
- print "Word most similar to 'australia' is '%s' (score: %.2f)" % (best_match, max_score)
+ print("(comparison between 'australia' and '%s': %.2f)" % (country, score))
+ print("Word most similar to 'australia' is '%s' (score: %.2f)" % (best_match, max_score))
if __name__ == "__main__":
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/seqclass.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/seqclass.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/seqclass.py
--- ../python3/nltk_contrib/nltk_contrib/seqclass.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/seqclass.py (refactored)
@@ -19,7 +19,7 @@
def classify(self, featuresets):
if self.size() == 0:
- raise ValueError, 'Tagger is not trained'
+ raise ValueError('Tagger is not trained')
for i, featureset in enumerate(featuresets):
@@ -91,7 +91,7 @@
stream = open(filename,'w')
yaml.dump_all(training_data, stream)
- print "Saving features to %s" % os.path.abspath(filename)
+ print("Saving features to %s" % os.path.abspath(filename))
stream.close()
@@ -100,7 +100,7 @@
dict_corpus = tabular2dict(training_corpus, KEYS)
contexts = self.contexts(dict_corpus)
- print "Detecting features"
+ print("Detecting features")
training_data = [(self.detect_features(c), c[1]['label']) for c in contexts]
if save:
@@ -118,11 +118,11 @@
Train a classifier.
"""
if self.size() != 0:
- raise ValueError, 'Classifier is already trained'
+ raise ValueError('Classifier is already trained')
training_data = self.corpus2training_data(training_corpus)
- print "Training classifier"
+ print("Training classifier")
self._model = iis(training_data)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler_unittest.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler_unittest.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/xmlhandler.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/unittest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/unittest.py (refactored)
@@ -41,4 +41,4 @@
def TestUnitOutputs(unitname, gold_file, test_file):
CompareOutputFiles(gold_file, test_file)
- print '%s successful' % unitname
+ print('%s successful' % unitname)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/tokens.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/tokens.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/tokens.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/tokens.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/tokens.py (refactored)
@@ -200,7 +200,7 @@
except KeyError: return self.InitTokenStats(tok)
def TokenStats(self):
- return self.tokstats_.values()
+ return list(self.tokstats_.values())
def SetN(self, n):
self.n_ = n
@@ -288,7 +288,7 @@
try: map[hash_string].append(token_)
except KeyError: map[hash_string] = [token_]
ntokens = []
- keys = map.keys()
+ keys = list(map.keys())
keys.sort()
for k in keys:
token_ = map[k][0]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp_unittest.py (refactored)
@@ -32,11 +32,11 @@
import auxiliary_comp
from __init__ import BASE_
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/token_comp.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_unittest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_unittest.py (refactored)
@@ -85,7 +85,7 @@
comparator.ComputeDistance()
result = comparator.ComparisonResult()
matches[(hash1, hash2)] = result
- values = matches.values()
+ values = list(matches.values())
values.sort(lambda x, y: cmp(x.Cost(), y.Cost()))
p = open(match_file, 'w') ## zero out the file
p.close()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_extractor.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_extractor.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_extractor.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_extractor.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/thai_extractor.py (refactored)
@@ -63,7 +63,7 @@
return None
def Dump(self, file):
- keys = self.table_.keys()
+ keys = list(self.table_.keys())
keys.sort(lambda x, y: cmp(self.table_[x], self.table_[y]))
p = open(file, 'w')
for k in keys:
@@ -90,7 +90,7 @@
extraction.
"""
list = []
- for u in unicode(text, 'utf8'):
+ for u in str(text, 'utf8'):
list.append(u.encode('utf8'))
return list
@@ -179,7 +179,7 @@
}
self.snow_session_ = snow.SnowSession(snow.MODE_SERVER,
snow_test_args)
- try: utext = unicode(line.strip(), 'utf-8')
+ try: utext = str(line.strip(), 'utf-8')
except TypeError: utext = line.strip()
segments = utext.split()
for segment in segments:
@@ -189,9 +189,8 @@
seglist = Listify(segment.encode('utf8'))
features = []
for i in range(len(seglist)):
- feats = ', '.join(map(lambda x: str(x),
- FeatureExtract(i, seglist,
- self.feature_map_))) + ':\n'
+ feats = ', '.join([str(x) for x in FeatureExtract(i, seglist,
+ self.feature_map_)]) + ':\n'
result = self.snow_session_.evaluateExample(feats)
target, a, b, activation = result.split('\n')[1].split()
target = int(target[:-1]) ## remove ':'
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/sample.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/sample.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/sample.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/sample.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/sample.py (refactored)
@@ -105,7 +105,7 @@
comparator.ComputeDistance()
result = comparator.ComparisonResult()
matches[(hash1, hash2)] = result
- values = matches.values()
+ values = list(matches.values())
values.sort(lambda x, y: cmp(x.Cost(), y.Cost()))
p = open(MATCH_FILE_, 'w') ## zero out the file
p.close()
@@ -130,7 +130,7 @@
comparator.ComputeDistance()
result = comparator.ComparisonResult()
correlates[(hash1, hash2)] = result
- values = correlates.values()
+ values = list(correlates.values())
values.sort(lambda x, y: cmp(x.Cost(), y.Cost()))
p = open(CORR_FILE_, 'w') ## zero out the file
p.close()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer_unittest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer_unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer_unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer_unittest.py (refactored)
@@ -57,9 +57,9 @@
for line in p:
line = line.strip()
word, pron = line.split('\t')
- try: word = unicode(word, 'utf-8')
+ try: word = str(word, 'utf-8')
except TypeError: pass
- try: pron = unicode(pron, 'utf-8')
+ try: pron = str(pron, 'utf-8')
except TypeError: pass
try:
GOLDEN_[word].AddPronunciation(pron)
@@ -73,7 +73,7 @@
if output: file = open(GOLDEN_FILE_, 'w')
else: LoadGolden()
for w in WORDS_:
- try: w = unicode(w.strip(), 'utf-8')
+ try: w = str(w.strip(), 'utf-8')
except TypeError: pass
token_ = tokens.Token(w)
pronouncer_ = pronouncer.UnitranPronouncer(token_)
@@ -89,7 +89,7 @@
file.write('%s\t%s\n' % (pronouncer_.Token().String(), p))
else:
try:
- string = unicode(pronouncer_.Token().String(), 'utf-8')
+ string = str(pronouncer_.Token().String(), 'utf-8')
except TypeError:
string = pronouncer_.Token().String()
assert string in GOLDEN_, \
@@ -107,10 +107,10 @@
nprons[i],
gprons[i])
if output:
- print 'generated %s' % GOLDEN_FILE_
+ print('generated %s' % GOLDEN_FILE_)
file.close()
else:
- print '%s successful' % sys.argv[0]
+ print('%s successful' % sys.argv[0])
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/pronouncer.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer_unittest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer_unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer_unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer_unittest.py (refactored)
@@ -47,28 +47,28 @@
# train the perceptron
pt.Train(dict[0:1000])
first_run = EvaluateExamples(pt)
- print first_run
+ print(first_run)
# results here should be the same
second_run = EvaluateExamples(pt)
- print second_run
+ print(second_run)
# learn from new examples
# produce new results
pt.Retrain(dict[1001:3000])
third_run = EvaluateExamples(pt)
- print third_run
+ print(third_run)
# this result should be the same as the third run
fourth_run = EvaluateExamples(pt)
- print fourth_run
+ print(fourth_run)
# test
if first_run == second_run and first_run != third_run \
and third_run == fourth_run:
- print 'unittest successful'
+ print('unittest successful')
else:
- print 'unsuccessful'
+ print('unsuccessful')
# clean up
pt.CleanUp()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron_trainer.py (refactored)
@@ -92,7 +92,7 @@
"""
# if the peceptron is already trained, warn and abort
if self.snow_p_.IsTrained():
- if DEBUG_: print 'Perceptron already trained (use Retrain?)'
+ if DEBUG_: print('Perceptron already trained (use Retrain?)')
return False
for example in pos_examples_list:
@@ -144,7 +144,7 @@
"""
# if the perceptron has not been trained, warn and abort
if not self.snow_p_.IsTrained():
- if DEBUG_: print 'Perceptron is not trained (use Train?)'
+ if DEBUG_: print('Perceptron is not trained (use Train?)')
return False
for example in new_positives:
@@ -193,7 +193,7 @@
Return: a tuple of activated target and activation, in the order as mentioned.
"""
if not self.snow_p_.IsTrained():
- if DEBUG_: print 'Perceptron not trained'
+ if DEBUG_: print('Perceptron not trained')
return False
test_ex = Example(s_token, t_token)
@@ -257,8 +257,7 @@
candidates = set(candidates)
candidates = list(candidates)
- distances = map(lambda x:
- (x, Distance(x.split(), token2.split())), candidates)
+ distances = [(x, Distance(x.split(), token2.split())) for x in candidates]
distances = sorted(distances, lambda x,y: x[1] - y[1])
for new_str in distances[1:5]:
@@ -288,7 +287,7 @@
"""
def __init__(self, l):
self.l_ = l
- self.left_els_ = map(lambda x: x[0], self.l_)
+ self.left_els_ = [x[0] for x in self.l_]
def CreateShuffledList(self):
shuffled_list = []
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/perceptron.py (refactored)
@@ -170,7 +170,7 @@
"""Dump the entire feature map to a file whose name is given as the parameter.
"""
fm_fp = open(feature_map_file, 'w')
- for k, v in self.feature_dic_.iteritems():
+ for k, v in self.feature_dic_.items():
fm_fp.write(k + '\t' + str(v) + '\n')
fm_fp.close()
return True
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/paper_example.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/paper_example.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/paper_example.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph_unittest.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph_unittest.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/morph.py (refactored)
@@ -86,7 +86,7 @@
def Morphs(self, string):
try: return self.morphs_[string]
- except AttributeError, KeyError: return ''
+ except AttributeError as KeyError: return ''
def LabelDoclist(self):
assert self.initialized_ == True, 'Must Initialize() the analyzer!'
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/miner.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/miner.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/miner.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/miner.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/miner.py (refactored)
@@ -170,7 +170,7 @@
result = comparator_.ComparisonResult()
matches[(hash1, hash2)] = result
did += 1
- values = matches.values()
+ values = list(matches.values())
values.sort(lambda x, y: comp(y.Cost(), x.Cost()))
if pdump:
sys.stderr.write('Dumping comparisons to %s...\n' % pdump)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/makeindex.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/makeindex.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/makeindex.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/makeindex.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/makeindex.py (refactored)
@@ -13,13 +13,13 @@
import sys
-print ''
-print ''
-print 'Pydoc for ScriptTranscriber'
-print ''
-print ''
+print('')
+print('')
+print('Pydoc for ScriptTranscriber')
+print('')
+print('')
for line in sys.stdin.readlines():
html = line.strip()
- print '%s
' % (html, html)
-print ''
-print ''
+ print('%s
' % (html, html))
+print('')
+print('')
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/japanese_extractor.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/japanese_extractor.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/japanese_extractor.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/japanese_extractor.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/japanese_extractor.py (refactored)
@@ -31,7 +31,7 @@
"""
def LineSegment(self, line):
- try: utext = unicode(line.strip(), 'utf-8')
+ try: utext = str(line.strip(), 'utf-8')
except TypeError: utext = line.strip()
word = []
for u in utext:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter_unittest.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter_unittest.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/filter.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor_unittest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor_unittest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor_unittest.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor_unittest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor_unittest.py (refactored)
@@ -64,7 +64,7 @@
'Token %d differs: %s != %s' % (i,
all_tokens[i].String(),
GOLDEN_[i])
- print '%s successful' % sys.argv[0]
+ print('%s successful' % sys.argv[0])
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/extractor.py (refactored)
@@ -66,7 +66,7 @@
## Go 'word' by word to make this more robust to unicode decode
## errors.
for w in line.split():
- try: ulinelist.append(unicode(w, 'utf-8'))
+ try: ulinelist.append(str(w, 'utf-8'))
except UnicodeDecodeError: pass
uline = ' '.join(ulinelist)
clist = []
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/documents.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/documents.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/documents.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/documents.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/documents.py (refactored)
@@ -124,7 +124,7 @@
def XmlDump(self, file=None, utf8=False):
if file is None:
- print '%s\n' % (self.XmlEncode(utf8))
+ print('%s\n' % (self.XmlEncode(utf8)))
return
p = open(file, 'w')
p.write('%s\n' % self.XmlEncode(utf8))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/def_pronouncers.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/def_pronouncers.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/def_pronouncers.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/chinese_extractor.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/chinese_extractor.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/chinese_extractor.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/chinese_extractor.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/chinese_extractor.py (refactored)
@@ -1305,7 +1305,7 @@
"""
def LineSegment(self, line):
- try: utext = unicode(line.strip(), 'utf-8')
+ try: utext = str(line.strip(), 'utf-8')
except TypeError: utext = line.strip()
word = []
for u in utext:
@@ -1331,7 +1331,7 @@
"""
def LineSegment(self, line):
- try: utext = unicode(line.strip(), 'utf-8')
+ try: utext = str(line.strip(), 'utf-8')
except TypeError: utext = line.strip()
for i in range(len(utext)):
for k in [4, 3, 2]:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/auxiliary_comp.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/auxiliary_comp.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/auxiliary_comp.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/auxiliary_comp.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/auxiliary_comp.py (refactored)
@@ -72,7 +72,7 @@
def LookupString(chars, convert=False):
pys = []
- for u in unicode(chars, 'utf8'):
+ for u in str(chars, 'utf8'):
try:
py = PINYIN_TABLE_[u.encode('utf8')]
npy = []
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/alignpairsFST.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/alignpairsFST.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/alignpairsFST.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/alignpairsFST.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/alignpairsFST.py (refactored)
@@ -68,12 +68,12 @@
row_label, costs = line.split(None,1)
if genSymbols: symbols.append(row_label)
if row_label not in symbols:
- print "Error: label (%s) not in defined symbols list" % row_label
+ print("Error: label (%s) not in defined symbols list" % row_label)
sys.exit(1)
rows.append(row_label)
costs = costs.split()
if len(costs) != len(cols):
- print 'Error: wrong number of costs on line %s' % line
+ print('Error: wrong number of costs on line %s' % line)
sys.exit(1)
for c in range(len(costs)):
if costs[c] in ('inf', 'Inf', 'INF'): costs[c] = INF_
@@ -247,7 +247,7 @@
aln1, aln2, cost = AlignFSTs(binph1, binph2, binmatrix, syms)
#aln1 = aln1.replace(EPSILON_, SHORT_EPS_)
#aln2 = aln2.replace(EPSILON_, SHORT_EPS_)
- print '%s\t%s\t%.6f' % (aln1, aln2, cost)
+ print('%s\t%s\t%.6f' % (aln1, aln2, cost))
ret = os.system('rm -f %s' % (binmatrix))
if ret != 0:
sys.stderr.write('Error in rm\'ing matrix\n')
@@ -255,8 +255,8 @@
if infile is not None: infp.close()
def usage(called):
- print '%s -m [-s ]' % (called),
- print '[-i ]'
+ print('%s -m [-s ]' % (called), end=' ')
+ print('[-i ]')
if __name__ == '__main__':
try:
@@ -282,6 +282,6 @@
infile = a
if matfile is None:
usage(sys.argv[0])
- print "Error: must provide a cost-matrix file."
+ print("Error: must provide a cost-matrix file.")
sys.exit(2)
main(matfile, symfile, infile)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/script.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/script.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/script.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/script.py (refactored)
@@ -19,97 +19,97 @@
""" Return the script of a unicode codepoint, only considering those
codepoints that correspond to characters of a script.
"""
- if u >= u'\u0000' and u <= u'\u007F': return 'Latin'
- elif u >= u'\u0080' and u <= u'\u00FF': return 'Latin'
- elif u >= u'\u0100' and u <= u'\u017F': return 'Latin'
- elif u >= u'\u0180' and u <= u'\u024F': return 'Latin'
- elif u >= u'\u0370' and u <= u'\u03FF': return 'Greek'
- elif u >= u'\u0400' and u <= u'\u04FF': return 'Cyrillic'
- elif u >= u'\u0500' and u <= u'\u052F': return 'Cyrillic'
- elif u >= u'\u0530' and u <= u'\u058F': return 'Armenian'
- elif u >= u'\u0590' and u <= u'\u05FF': return 'Hebrew'
- elif u >= u'\u0600' and u <= u'\u06FF': return 'Arabic'
- elif u >= u'\u0700' and u <= u'\u074F': return 'Syriac'
- elif u >= u'\u0750' and u <= u'\u077F': return 'Arabic'
- elif u >= u'\u0780' and u <= u'\u07BF': return 'Thaana'
- elif u >= u'\u0900' and u <= u'\u097F': return 'Devanagari'
- elif u >= u'\u0980' and u <= u'\u09FF': return 'Bengali'
- elif u >= u'\u0A00' and u <= u'\u0A7F': return 'Gurmukhi'
- elif u >= u'\u0A80' and u <= u'\u0AFF': return 'Gujarati'
- elif u >= u'\u0B00' and u <= u'\u0B7F': return 'Oriya'
- elif u >= u'\u0B80' and u <= u'\u0BFF': return 'Tamil'
- elif u >= u'\u0C00' and u <= u'\u0C7F': return 'Telugu'
- elif u >= u'\u0C80' and u <= u'\u0CFF': return 'Kannada'
- elif u >= u'\u0D00' and u <= u'\u0D7F': return 'Malayalam'
- elif u >= u'\u0D80' and u <= u'\u0DFF': return 'Sinhala'
- elif u >= u'\u0E00' and u <= u'\u0E7F': return 'Thai'
- elif u >= u'\u0E80' and u <= u'\u0EFF': return 'Lao'
- elif u >= u'\u0F00' and u <= u'\u0FFF': return 'Tibetan'
- elif u >= u'\u1000' and u <= u'\u109F': return 'Burmese'
- elif u >= u'\u10A0' and u <= u'\u10FF': return 'Georgian'
- elif u >= u'\u1100' and u <= u'\u11FF': return 'Hangul'
- elif u >= u'\u1200' and u <= u'\u137F': return 'Ethiopic'
- elif u >= u'\u1380' and u <= u'\u139F': return 'Ethiopic'
- elif u >= u'\u13A0' and u <= u'\u13FF': return 'Cherokee'
- elif u >= u'\u1400' and u <= u'\u167F': return 'UCS'
- elif u >= u'\u1680' and u <= u'\u169F': return 'Ogham'
- elif u >= u'\u16A0' and u <= u'\u16FF': return 'Runic'
- elif u >= u'\u1700' and u <= u'\u171F': return 'Tagalog'
- elif u >= u'\u1720' and u <= u'\u173F': return 'Hanunoo'
- elif u >= u'\u1740' and u <= u'\u175F': return 'Buhid'
- elif u >= u'\u1760' and u <= u'\u177F': return 'Tagbanwa'
- elif u >= u'\u1780' and u <= u'\u17FF': return 'Khmer'
- elif u >= u'\u1800' and u <= u'\u18AF': return 'Mongolian'
- elif u >= u'\u1900' and u <= u'\u194F': return 'Limbu'
- elif u >= u'\u1950' and u <= u'\u197F': return 'Tai Le'
- elif u >= u'\u1980' and u <= u'\u19DF': return 'New Tai Lue'
- elif u >= u'\u19E0' and u <= u'\u19FF': return 'Khmer'
- elif u >= u'\u1A00' and u <= u'\u1A1F': return 'Buginese'
- elif u >= u'\u1E00' and u <= u'\u1EFF': return 'Latin'
- elif u >= u'\u1F00' and u <= u'\u1FFF': return 'Greek'
- elif u >= u'\u2C00' and u <= u'\u2C5F': return 'Glagolitic'
- elif u >= u'\u2C80' and u <= u'\u2CFF': return 'Coptic'
- elif u >= u'\u2D00' and u <= u'\u2D2F': return 'Georgian'
- elif u >= u'\u2D30' and u <= u'\u2D7F': return 'Tifinagh'
- elif u >= u'\u2D80' and u <= u'\u2DDF': return 'Ethiopic'
- elif u >= u'\u2E80' and u <= u'\u2EFF': return 'CJK'
- elif u >= u'\u2F00' and u <= u'\u2FDF': return 'Kangxi Radicals'
- elif u >= u'\u3040' and u <= u'\u309F': return 'Hiragana'
- elif u >= u'\u30A0' and u <= u'\u30FF': return 'Katakana'
- elif u >= u'\u3100' and u <= u'\u312F': return 'Bopomofo'
- elif u >= u'\u3130' and u <= u'\u318F': return 'Hangul'
- elif u >= u'\u3190' and u <= u'\u319F': return 'Kanbun'
- elif u >= u'\u31A0' and u <= u'\u31BF': return 'Bopomofo'
- elif u >= u'\u31F0' and u <= u'\u31FF': return 'Katakana'
- elif u >= u'\u3300' and u <= u'\u33FF': return 'CJK'
- elif u >= u'\u3400' and u <= u'\u4DBF': return 'CJK'
- elif u >= u'\u4E00' and u <= u'\u9FFF': return 'CJK'
- elif u >= u'\uA000' and u <= u'\uA48F': return 'Yi'
- elif u >= u'\uA490' and u <= u'\uA4CF': return 'Yi'
- elif u >= u'\uA800' and u <= u'\uA82F': return 'Syloti Nagri'
- elif u >= u'\uAC00' and u <= u'\uD7AF': return 'Hangul'
- elif u >= u'\uF900' and u <= u'\uFAFF': return 'CJK'
- elif u >= u'\uFE30' and u <= u'\uFE4F': return 'CJK'
- elif u >= u'\uFE70' and u <= u'\uFEFF': return 'Arabic'
- elif u >= u'\u10000' and u <= u'\u1007F': return 'Linear B'
- elif u >= u'\u10080' and u <= u'\u100FF': return 'Linear B'
- elif u >= u'\u10300' and u <= u'\u1032F': return 'Old Italic'
- elif u >= u'\u10330' and u <= u'\u1034F': return 'Gothic'
- elif u >= u'\u10380' and u <= u'\u1039F': return 'Ugaritic'
- elif u >= u'\u103A0' and u <= u'\u103DF': return 'Old Persian'
- elif u >= u'\u10400' and u <= u'\u1044F': return 'Deseret'
- elif u >= u'\u10450' and u <= u'\u1047F': return 'Shavian'
- elif u >= u'\u10480' and u <= u'\u104AF': return 'Osmanya'
- elif u >= u'\u10800' and u <= u'\u1083F': return 'Cypriot Syllabary'
- elif u >= u'\u10A00' and u <= u'\u10A5F': return 'Kharoshthi'
- elif u >= u'\u20000' and u <= u'\u2A6DF': return 'CJK'
- elif u >= u'\u2F800' and u <= u'\u2FA1F': return 'CJK'
+ if u >= '\u0000' and u <= '\u007F': return 'Latin'
+ elif u >= '\u0080' and u <= '\u00FF': return 'Latin'
+ elif u >= '\u0100' and u <= '\u017F': return 'Latin'
+ elif u >= '\u0180' and u <= '\u024F': return 'Latin'
+ elif u >= '\u0370' and u <= '\u03FF': return 'Greek'
+ elif u >= '\u0400' and u <= '\u04FF': return 'Cyrillic'
+ elif u >= '\u0500' and u <= '\u052F': return 'Cyrillic'
+ elif u >= '\u0530' and u <= '\u058F': return 'Armenian'
+ elif u >= '\u0590' and u <= '\u05FF': return 'Hebrew'
+ elif u >= '\u0600' and u <= '\u06FF': return 'Arabic'
+ elif u >= '\u0700' and u <= '\u074F': return 'Syriac'
+ elif u >= '\u0750' and u <= '\u077F': return 'Arabic'
+ elif u >= '\u0780' and u <= '\u07BF': return 'Thaana'
+ elif u >= '\u0900' and u <= '\u097F': return 'Devanagari'
+ elif u >= '\u0980' and u <= '\u09FF': return 'Bengali'
+ elif u >= '\u0A00' and u <= '\u0A7F': return 'Gurmukhi'
+ elif u >= '\u0A80' and u <= '\u0AFF': return 'Gujarati'
+ elif u >= '\u0B00' and u <= '\u0B7F': return 'Oriya'
+ elif u >= '\u0B80' and u <= '\u0BFF': return 'Tamil'
+ elif u >= '\u0C00' and u <= '\u0C7F': return 'Telugu'
+ elif u >= '\u0C80' and u <= '\u0CFF': return 'Kannada'
+ elif u >= '\u0D00' and u <= '\u0D7F': return 'Malayalam'
+ elif u >= '\u0D80' and u <= '\u0DFF': return 'Sinhala'
+ elif u >= '\u0E00' and u <= '\u0E7F': return 'Thai'
+ elif u >= '\u0E80' and u <= '\u0EFF': return 'Lao'
+ elif u >= '\u0F00' and u <= '\u0FFF': return 'Tibetan'
+ elif u >= '\u1000' and u <= '\u109F': return 'Burmese'
+ elif u >= '\u10A0' and u <= '\u10FF': return 'Georgian'
+ elif u >= '\u1100' and u <= '\u11FF': return 'Hangul'
+ elif u >= '\u1200' and u <= '\u137F': return 'Ethiopic'
+ elif u >= '\u1380' and u <= '\u139F': return 'Ethiopic'
+ elif u >= '\u13A0' and u <= '\u13FF': return 'Cherokee'
+ elif u >= '\u1400' and u <= '\u167F': return 'UCS'
+ elif u >= '\u1680' and u <= '\u169F': return 'Ogham'
+ elif u >= '\u16A0' and u <= '\u16FF': return 'Runic'
+ elif u >= '\u1700' and u <= '\u171F': return 'Tagalog'
+ elif u >= '\u1720' and u <= '\u173F': return 'Hanunoo'
+ elif u >= '\u1740' and u <= '\u175F': return 'Buhid'
+ elif u >= '\u1760' and u <= '\u177F': return 'Tagbanwa'
+ elif u >= '\u1780' and u <= '\u17FF': return 'Khmer'
+ elif u >= '\u1800' and u <= '\u18AF': return 'Mongolian'
+ elif u >= '\u1900' and u <= '\u194F': return 'Limbu'
+ elif u >= '\u1950' and u <= '\u197F': return 'Tai Le'
+ elif u >= '\u1980' and u <= '\u19DF': return 'New Tai Lue'
+ elif u >= '\u19E0' and u <= '\u19FF': return 'Khmer'
+ elif u >= '\u1A00' and u <= '\u1A1F': return 'Buginese'
+ elif u >= '\u1E00' and u <= '\u1EFF': return 'Latin'
+ elif u >= '\u1F00' and u <= '\u1FFF': return 'Greek'
+ elif u >RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/script.py
= '\u2C00' and u <= '\u2C5F': return 'Glagolitic'
+ elif u >= '\u2C80' and u <= '\u2CFF': return 'Coptic'
+ elif u >= '\u2D00' and u <= '\u2D2F': return 'Georgian'
+ elif u >= '\u2D30' and u <= '\u2D7F': return 'Tifinagh'
+ elif u >= '\u2D80' and u <= '\u2DDF': return 'Ethiopic'
+ elif u >= '\u2E80' and u <= '\u2EFF': return 'CJK'
+ elif u >= '\u2F00' and u <= '\u2FDF': return 'Kangxi Radicals'
+ elif u >= '\u3040' and u <= '\u309F': return 'Hiragana'
+ elif u >= '\u30A0' and u <= '\u30FF': return 'Katakana'
+ elif u >= '\u3100' and u <= '\u312F': return 'Bopomofo'
+ elif u >= '\u3130' and u <= '\u318F': return 'Hangul'
+ elif u >= '\u3190' and u <= '\u319F': return 'Kanbun'
+ elif u >= '\u31A0' and u <= '\u31BF': return 'Bopomofo'
+ elif u >= '\u31F0' and u <= '\u31FF': return 'Katakana'
+ elif u >= '\u3300' and u <= '\u33FF': return 'CJK'
+ elif u >= '\u3400' and u <= '\u4DBF': return 'CJK'
+ elif u >= '\u4E00' and u <= '\u9FFF': return 'CJK'
+ elif u >= '\uA000' and u <= '\uA48F': return 'Yi'
+ elif u >= '\uA490' and u <= '\uA4CF': return 'Yi'
+ elif u >= '\uA800' and u <= '\uA82F': return 'Syloti Nagri'
+ elif u >= '\uAC00' and u <= '\uD7AF': return 'Hangul'
+ elif u >= '\uF900' and u <= '\uFAFF': return 'CJK'
+ elif u >= '\uFE30' and u <= '\uFE4F': return 'CJK'
+ elif u >= '\uFE70' and u <= '\uFEFF': return 'Arabic'
+ elif u >= '\u10000' and u <= '\u1007F': return 'Linear B'
+ elif u >= '\u10080' and u <= '\u100FF': return 'Linear B'
+ elif u >= '\u10300' and u <= '\u1032F': return 'Old Italic'
+ elif u >= '\u10330' and u <= '\u1034F': return 'Gothic'
+ elif u >= '\u10380' and u <= '\u1039F': return 'Ugaritic'
+ elif u >= '\u103A0' and u <= '\u103DF': return 'Old Persian'
+ elif u >= '\u10400' and u <= '\u1044F': return 'Deseret'
+ elif u >= '\u10450' and u <= '\u1047F': return 'Shavian'
+ elif u >= '\u10480' and u <= '\u104AF': return 'Osmanya'
+ elif u >= '\u10800' and u <= '\u1083F': return 'Cypriot Syllabary'
+ elif u >= '\u10A00' and u <= '\u10A5F': return 'Kharoshthi'
+ elif u >= '\u20000' and u <= '\u2A6DF': return 'CJK'
+ elif u >= '\u2F800' and u <= '\u2FA1F': return 'CJK'
else: return UNKNOWN_SCRIPT_
def StringToScript(string, encoding='utf8'):
stats = {}
- try: ustring = unicode(string, encoding)
+ try: ustring = str(string, encoding)
except TypeError: ustring = string
for u in ustring:
if u.isspace(): continue
@@ -126,13 +126,13 @@
def Lower(string, encoding='utf8'):
try:
- return unicode(string, encoding).lower().encode(encoding)
+ return str(string, encoding).lower().encode(encoding)
except TypeError:
return string.lower().encode(encoding)
def Upper(string, encoding='utf8'):
- return unicode(string, encoding).upper().encode(encoding)
+ return str(string, encoding).upper().encode(encoding)
def SupportsCapitalization(string, encoding='utf8'):
@@ -140,7 +140,7 @@
def IsCapitalized(string, encoding='utf8'):
- try: ustring = unicode(string, encoding)
+ try: ustring = str(string, encoding)
except TypeError: ustring = string
if ustring.lower()[0] == ustring[0]:
return False
@@ -148,7 +148,7 @@
def IsPunctuation(character, encoding='utf-8'):
- try: uchar = unicode(character, encoding)
+ try: uchar = str(character, encoding)
except TypeError: uchar = character
return unicodedata.category(uchar)[:1] == 'P'
@@ -159,7 +159,7 @@
def HasPunctuation(word, encoding='utf-8'):
haspunctuation = False
- try: uword = unicode(word, encoding)
+ try: uword = str(word, encoding)
except TypeError: uword = word
for uchar in uword:
if unicodedata.category(uchar)[:1] == 'P':
@@ -170,7 +170,7 @@
def HasDigit(word, encoding='utf-8'):
hasdigit = False
- try: uword = unicode(word, encoding)
+ try: uword = str(word, encoding)
except TypeError: uword = word
for uchar in uword:
if unicodedata.category(uchar) == 'Nd':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/latin.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/latin.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/latin.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/latin.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/latin.py (refactored)
@@ -21,7 +21,7 @@
def LatinToWorldBet(string):
output = []
some_success = False
- for c in unicode(string, 'utf8'):
+ for c in str(string, 'utf8'):
c = c.encode('utf-8')
try:
output.append(LATIN_[c])
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi_new.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi_new.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi_new.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi_new.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi_new.py (refactored)
@@ -41,7 +41,7 @@
output = []
some_success = False
internal = False
- for c in unicode(string, 'utf8'):
+ for c in str(string, 'utf8'):
c = c.encode('utf-8')
try:
pron = KUNYOMI_[c]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/kunyomi.py (refactored)
@@ -23,7 +23,7 @@
def KanjiToWorldBet(string):
output = []
some_success = False
- for c in unicode(string, 'utf8'):
+ for c in str(string, 'utf8'):
c = c.encode('utf-8')
try:
output.append(KUNYOMI_[c])
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/english.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/english.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/english.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/chinese.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/chinese.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/chinese.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/chinese.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Utils/chinese.py (refactored)
@@ -23,7 +23,7 @@
def HanziToWorldBet(string):
output = []
some_success = False
- for c in unicode(string, 'utf8'):
+ for c in str(string, 'utf8'):
c = c.encode('utf-8')
try:
output.append(MANDARIN_[c])
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/unitran.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/unitran.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/unitran.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/unitran.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/unitran.py (refactored)
@@ -41,7 +41,7 @@
p = open(file)
for line in p.readlines():
line = line.split('\n')
- unis = "u'\u%s'"%line[0]
+ unis = "u'\\u%s'"%line[0]
uni = eval(unis)
Indic.append(uni)
return Indic
@@ -54,9 +54,9 @@
p = open(file)
for line in p.readlines():
line = line.split('\t')
- prev = "u'\u%s'"%line[0]
- cur = "u'\u%s'"%line[1]
- comp = "u'\u%s'"%line[2].strip('\n')
+ prev = "u'\\u%s'"%line[0]
+ cur = "u'\\u%s'"%line[1]
+ comp = "u'\\u%s'"%line[2].strip('\n')
pre = eval(prev)
curr = eval(cur)
comps = eval(comp)
@@ -81,7 +81,7 @@
new = ''
prev = None
token = thaifix.ThaiFix(token)
- try: utoken = unicode(token.strip() + ' ', 'utf8')
+ try: utoken = str(token.strip() + ' ', 'utf8')
except UnicodeDecodeError: return token
for c in utoken:
if prev:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/thaifix.py (refactored)
@@ -25,78 +25,78 @@
"""
ThaiTable = {
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/mk_sampa_table.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/mk_sampa_table.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/mk_sampa_table.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/mk_sampa_table.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/mk_sampa_table.py (refactored)
@@ -34,7 +34,7 @@
p = open('X_Tables.py', 'w')
p.write('# coding=utf-8\n')
p.write('TransTable = {\n')
- keys = newTable.keys()
+ keys = list(newTable.keys())
keys.sort()
for u in keys:
xstring, utf = newTable[u]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Wb2Xs.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Wb2Xs.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Wb2Xs.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Utils/gentable.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Utils/gentable.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Utils/gentable.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Utils/gentable.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/Unitran/Utils/gentable.py (refactored)
@@ -27,9 +27,9 @@
sys.stdout = open('Tables.py', 'w')
-print '# coding=utf-8'
+print('# coding=utf-8')
-print "TransTable = {"
+print("TransTable = {")
for line in sys.stdin.readlines():
if not '#' in line:
@@ -37,8 +37,8 @@
if len(line[0]) > 1:
if len(line) == 1: worldbet = '(##)'
else: worldbet = line[1]
- unistring = "u'\u%s'" % line[0]
+ unistring = "u'\\u%s'" % line[0]
uni = eval(unistring)
utf8 = uni.encode('utf8')
- print " %s : ['%s','%s']," % (unistring, worldbet, utf8)
-print " }"
+ print(" %s : ['%s','%s']," % (unistring, worldbet, utf8))
+print(" }")
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/scripttranscriber/MinEditDist/mEdit.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/scripttranscriber/MinEditDist/mEdit.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/scripttranscriber/MinEditDist/mEdit.py
--- ../python3/nltk_contrib/nltk_contrib/scripttranscriber/MinEditDist/mEdit.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/scripttranscriber/MinEditDist/mEdit.py (refactored)
@@ -52,7 +52,7 @@
## List of all features
-featList = FCDic.keys()
+featList = list(FCDic.keys())
LClist = ['L', 'C', 'D']
PClist = ['C', 'D']
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/rte/logicentail.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/rte/logicentail.py
--- ../python3/nltk_contrib/nltk_contrib/rte/logicentail.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/rte/logicentail.py (refactored)
@@ -40,13 +40,13 @@
if text_drs_list:
text_ex = text_drs_list[0].simplify().toFol()
else:
- if verbose: print 'ERROR: No readings were generated for the Text'
+ if verbose: print('ERROR: No readings were generated for the Text')
hyp_drs_list = glueclass.parse_to_meaning(hyp)
if hyp_drs_list:
hyp_ex = hyp_drs_list[0].simplify().toFol()
else:
- if verbose: print 'ERROR: No readings were generated for the Hypothesis'
+ if verbose: print('ERROR: No readings were generated for the Hypothesis')
#1. proof T -> H
#2. proof (BK & T) -> H
@@ -56,27 +56,27 @@
#6. satisfy BK & T & H
result = inference.Prover9().prove(hyp_ex, [text_ex])
- if verbose: print 'prove: T -> H: %s' % result
+ if verbose: print('prove: T -> H: %s' % result)
if not result:
bk = self._generate_BK(text, hyp, verbose)
bk_exs = [bk_pair[0] for bk_pair in bk]
if verbose:
- print 'Generated Background Knowledge:'
+ print('Generated Background Knowledge:')
for bk_ex in bk_exs:
- print bk_ex
+ print(bk_ex)
result = inference.Prover9().prove(hyp_ex, [text_ex]+bk_exs)
- if verbose: print 'prove: (T & BK) -> H: %s' % result
+ if verbose: print('prove: (T & BK) -> H: %s' % result)
if not result:
consistent = self.check_consistency(bk_exs+[text_ex])
- if verbose: print 'consistency check: (BK & T): %s' % consistent
+ if verbose: print('consistency check: (BK & T): %s' % consistent)
if consistent:
consistent = self.check_consistency(bk_exs+[text_ex, hyp_ex])
- if verbose: print 'consistency check: (BK & T & H): %s' % consistent
+ if verbose: print('consistency check: (BK & T & H): %s' % consistent)
return result
@@ -98,8 +98,8 @@
hypbow = set(word.lower() for word in hyp)
if verbose:
- print 'textbow: %s' % textbow
- print 'hypbow: %s' % hypbow
+ print('textbow: %s' % textbow)
+ print('hypbow: %s' % hypbow)
if self.stop:
textbow = textbow - self.stopwords
@@ -225,9 +225,9 @@
tagger = RTEInferenceTagger()
text = 'John see a car'
- print 'Text: ', text
+ print('Text: ', text)
hyp = 'John watch an auto'
- print 'Hyp: ', hyp
+ print('Hyp: ', hyp)
# text_ex = LogicParser().parse('exists e x y.(david(x) & own(e) & subj(e,x) & obj(e,y) & car(y))')
# hyp_ex = LogicParser().parse('exists e x y.(david(x) & have(e) & subj(e,x) & obj(e,y) & auto(y))')
@@ -237,17 +237,17 @@
if text_drs_list:
text_ex = text_drs_list[0].simplify().toFol()
else:
- print 'ERROR: No readings were be generated for the Text'
+ print('ERROR: No readings were be generated for the Text')
hyp_drs_list = glueclass.parse_to_meaning(hyp)
if hyp_drs_list:
hyp_ex = hyp_drs_list[0].simplify().toFol()
else:
- print 'ERROR: No readings were be generated for the Hypothesis'
-
- print 'Text: ', text_ex
- print 'Hyp: ', hyp_ex
- print ''
+ print('ERROR: No readings were be generated for the Hypothesis')
+
+ print('Text: ', text_ex)
+ print('Hyp: ', hyp_ex)
+ print('')
#1. proof T -> H
#2. proof (BK & T) -> H
@@ -257,67 +257,67 @@
#6. satisfy BK & T & H
result = inference.Prover9().prove(hyp_ex, [text_ex])
- print 'prove: T -> H: %s' % result
- if result:
- print 'Logical entailmeRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/rte/logicentail.py
nt\n'
- else:
- print 'No logical entailment\n'
+ print('prove: T -> H: %s' % result)
+ if result:
+ print('Logical entailment\n')
+ else:
+ print('No logical entailment\n')
bk = tagger._generate_BK(text, hyp, verbose)
bk_exs = [bk_pair[0] for bk_pair in bk]
- print 'Generated Background Knowledge:'
+ print('Generated Background Knowledge:')
for bk_ex in bk_exs:
- print bk_ex
- print ''
+ print(bk_ex)
+ print('')
result = inference.Prover9().prove(hyp_ex, [text_ex]+bk_exs)
- print 'prove: (T & BK) -> H: %s' % result
- if result:
- print 'Logical entailment\n'
- else:
- print 'No logical entailment\n'
+ print('prove: (T & BK) -> H: %s' % result)
+ if result:
+ print('Logical entailment\n')
+ else:
+ print('No logical entailment\n')
# Check if the background knowledge axioms are inconsistent
result = inference.Prover9().prove(assumptions=bk_exs+[text_ex]).prove()
- print 'prove: (BK & T): %s' % result
- if result:
- print 'Inconsistency -> Entailment unknown\n'
- else:
- print 'No inconsistency\n'
+ print('prove: (BK & T): %s' % result)
+ if result:
+ print('Inconsistency -> Entailment unknown\n')
+ else:
+ print('No inconsistency\n')
result = inference.Prover9().prove(assumptions=bk_exs+[text_ex, hyp_ex])
- print 'prove: (BK & T & H): %s' % result
- if result:
- print 'Inconsistency -> Entailment unknown\n'
- else:
- print 'No inconsistency\n'
+ print('prove: (BK & T & H): %s' % result)
+ if result:
+ print('Inconsistency -> Entailment unknown\n')
+ else:
+ print('No inconsistency\n')
result = inference.Mace().build_model(assumptions=bk_exs+[text_ex])
- print 'satisfy: (BK & T): %s' % result
- if result:
- print 'No inconsistency\n'
- else:
- print 'Inconsistency -> Entailment unknown\n'
+ print('satisfy: (BK & T): %s' % result)
+ if result:
+ print('No inconsistency\n')
+ else:
+ print('Inconsistency -> Entailment unknown\n')
result = inference.Mace().build_model(assumptions=bk_exs+[text_ex, hyp_ex]).build_model()
- print 'satisfy: (BK & T & H): %s' % result
- if result:
- print 'No inconsistency\n'
- else:
- print 'Inconsistency -> Entailment unknown\n'
+ print('satisfy: (BK & T & H): %s' % result)
+ if result:
+ print('No inconsistency\n')
+ else:
+ print('Inconsistency -> Entailment unknown\n')
def test_check_consistency():
a = LogicParser().parse('man(j)')
b = LogicParser().parse('-man(j)')
- print '%s, %s: %s' % (a, b, RTEInferenceTagger().check_consistency([a,b], True))
- print '%s, %s: %s' % (a, a, RTEInferenceTagger().check_consistency([a,a], True))
+ print('%s, %s: %s' % (a, b, RTEInferenceTagger().check_consistency([a,b], True)))
+ print('%s, %s: %s' % (a, a, RTEInferenceTagger().check_consistency([a,a], True)))
def tag(text, hyp):
- print 'Text: ', text
- print 'Hyp: ', hyp
- print 'Entailment =', RTEInferenceTagger().tag_sentences(text, hyp, True)
- print ''
+ print('Text: ', text)
+ print('Hyp: ', hyp)
+ print('Entailment =', RTEInferenceTagger().tag_sentences(text, hyp, True))
+ print('')
if __name__ == '__main__':
# test_check_consistency()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/rte/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/util.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/util.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/util.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/util.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/util.py (refactored)
@@ -97,7 +97,7 @@
# Put the highest priority attributes next to the noun
for attr in attr_prefs:
if (attrs.count(attr) > 0):
- if (handlers != None) and (handlers.has_key(attr)):
+ if (handlers != None) and (attr in handlers):
attr_queue.insert(0, handlers[attr](desc_dict[attr]))
else:
attr_queue.insert(0, desc_dict[attr])
@@ -138,13 +138,13 @@
# There is a difference between generating the phrases:
# "the box on the table" and "the table on which the box sits"
if cur_rel[2] == target_id:
- if (handlers != None) and (handlers.has_key(rel_desc)):
+ if (handlers != None) and (rel_desc in handlers):
rel_desc = handlers[rel_desc](True)
other_desc = generate_phrase_rel(other_attrs, attr_prefs, cur_rel[3], handlers, False)
clauses.append("%s %s %s" % (target_desc, rel_desc, other_desc))
else:
- if (handlers != None) and (handlers.has_key(rel_desc)):
+ if (handlers != None) and (rel_desc in handlers):
rel_desc = handlers[rel_desc](False)
other_desc = generate_phrase_rel(other_attrs, attr_prefs, cur_rel[2], handlers, False)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/relational.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/relational.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/relational.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/relational.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/relational.py (refactored)
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import constraint
+from . import constraint
from copy import copy, deepcopy
-from util import validate_facts, Type, Rel, generate_phrase_rel
+from .util import validate_facts, Type, Rel, generate_phrase_rel
class _RelationalVar:
"""Internal class used to represent relational variables"""
@@ -70,7 +70,7 @@
def __fact_replace(self, fact, to_replace, replace_with):
"""Replaces all occurrences of to_replace in fact with replace_with"""
- return fact[:2] + map(lambda fact_id: replace_with if (not isinstance(fact_id, _RelationalVar) and fact_id == to_replace) else fact_id, fact[2:])
+ return fact[:2] + [replace_with if (not isinstance(fact_id, _RelationalVar) and fact_id == to_replace) else fact_id for fact_id in fact[2:]]
def __get_context_set(self, constraints, obj_var):
"""Returns a set of objects that fit the given constraints for obj_var"""
@@ -183,11 +183,11 @@
rel = Relational(facts)
obj_types = [f for f in facts if f[0] == Type] # Include types in the description for clarity
handlers = {
- "on" : lambda(lr): "on" if lr else "on which lies",
- "in" : lambda(lr): "in" if lr else "in which lies"
+ "on" : lambda lr: "on" if lr else "on which lies",
+ "in" : lambda lr: "in" if lr else "in which lies"
}
# Generate an English description for each object
for obj_id in ["c1", "c2", "c3", "b1", "b2", "t1", "t2", "f1"]:
- print "%s: %s" % (obj_id, generate_phrase_rel(rel.describe(obj_id) + obj_types, ["color"], obj_id, handlers))
+ print("%s: %s" % (obj_id, generate_phrase_rel(rel.describe(obj_id) + obj_types, ["color"], obj_id, handlers)))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/incremental.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/incremental.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/incremental.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/incremental.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/incremental.py (refactored)
@@ -1,7 +1,7 @@
import string
from copy import copy, deepcopy
-from util import validate_facts, Type, Rel, generate_phrase
+from .util import validate_facts, Type, Rel, generate_phrase
class Incremental:
"""
@@ -181,7 +181,7 @@
# Print English description for each object
for obj_id in ["obj1", "obj2", "obj3"]:
obj_type = [f for f in facts if f[0] == Type and f[2] == obj_id] # Include type for clarity
- print "%s: %s" % (obj_id, generate_phrase(incr.describe(obj_id) + obj_type, ["color", "size"]))
+ print("%s: %s" % (obj_id, generate_phrase(incr.describe(obj_id) + obj_type, ["color", "size"])))
class Taxonomy:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/gre3d_facts.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/gre3d_facts.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/gre3d_facts.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/gre3d_facts.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/gre3d_facts.py (refactored)
@@ -1,8 +1,8 @@
-from full_brevity import *
-from incremental import *
-from relational import *
-
-import util
+from .full_brevity import *
+from .incremental import *
+from .relational import *
+
+from . import util
def getFacts():
"""
@@ -285,9 +285,9 @@
taxonomy = Taxonomy({})
handlers = {
- "in_front_of": lambda(lr): "in front of",
- "left_of": lambda(lr): "to the left of",
- "right_of": lambda(lr): "to the right of"
+ "in_front_of": lambda lr: "in front of",
+ "left_of": lambda lr: "to the left of",
+ "right_of": lambda lr: "to the right of"
}
#Print out the referring expressions generated by each algorithm for each scene
@@ -301,7 +301,7 @@
rel = Relational(facts[i])
desc_rel = rel.describe("r1")
- print "%#02d,\"Full Brevity\",\"%s\"" % (i, util.generate_phrase(desc_fb, ranked_attrs))
- print "%#02d,\"Incremental\",\"%s\"" % (i, util.generate_phrase(desc_incr, ranked_attrs))
- print "%#02d,\"Relational\",\"%s\"" % (i, util.generate_phrase_rel(desc_rel, ranked_attrs, "r1", handlers))
-
+ print("%#02d,\"Full Brevity\",\"%s\"" % (i, util.generate_phrase(desc_fb, ranked_attrs)))
+ print("%#02d,\"Incremental\",\"%s\"" % (i, util.generate_phrase(desc_incr, ranked_attrs)))
+ print("%#02d,\"Relational\",\"%s\"" % (i, util.generate_phrase_rel(desc_rel, ranked_attrs, "r1", handlers)))
+
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/full_brevity.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/full_brevity.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/full_brevity.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/full_brevity.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/full_brevity.py (refactored)
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from util import validate_facts, Type, Rel, generate_phrase
+from .util import validate_facts, Type, Rel, generate_phrase
class FullBrevity:
"""
@@ -30,7 +30,7 @@
"""
self.facts = facts
self.object_ids = validate_facts(self.facts)
- assert not any(map(lambda f: f == Rel, self.facts)), "Full Brevity does not support relationships"
+ assert not any([f == Rel for f in self.facts]), "Full Brevity does not support relationships"
def describe(self, target_id):
"""
@@ -55,7 +55,7 @@
best_prop = None
# Find the property that best constrains the distractors set
- for prop_key in properties.keys():
+ for prop_key in list(properties.keys()):
prop_val = properties[prop_key]
dist_set = [dist for dist in distractors if dist[prop_key][1] == prop_val[1]]
if (best_set is None) or (len(dist_set) < len(best_set)):
@@ -81,5 +81,5 @@
# Print English description for each object
for obj_id in ["obj1", "obj2", "obj3"]:
obj_type = [f for f in facts if f[0] == Type and f[2] == obj_id] # Include type for clarity
- print "%s: %s" % (obj_id, generate_phrase(fb.describe(obj_id) + obj_type, ["color", "size"]))
+ print("%s: %s" % (obj_id, generate_phrase(fb.describe(obj_id) + obj_type, ["color", "size"])))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/drawers.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/drawers.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/drawers.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/drawers.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/drawers.py (refactored)
@@ -1,8 +1,8 @@
from random import shuffle
-from full_brevity import *
-from relational import *
-from incremental import *
-from util import generate_phrase, generate_phrase_rel
+from .full_brevity import *
+from .relational import *
+from .incremental import *
+from .util import generate_phrase, generate_phrase_rel
if __name__ == '__main__':
# This data is based on the drawer pictures from Vienthen and Dale (2006)
@@ -268,7 +268,7 @@
shuffle(facts, lambda: 0.0)
- fb = FullBrevity(filter(lambda f: f[0] != Rel, facts))
+ fb = FullBrevity([f for f in facts if f[0] != Rel])
rel = Relational(facts)
#The ordered priority for using attributes, important for incremental algorithm
ranked_attrs = ["color", "row", "col", "corner"]
@@ -279,19 +279,19 @@
#defines how to turn these rules into English phrases
handlers = {
- "col": lambda(desc): "column %s" % desc,
- "row": lambda(desc): "row %s" % desc,
- "corner": lambda(desc): "corner",
- "above": lambda(lr): "above" if lr else "below",
- "below": lambda(lr): "below" if lr else "above",
- "right": lambda(lr): "to the right of" if lr else "to the left of",
- "left": lambda(lr): "to the left of" if lr else "to the right of"
+ "col": lambda desc: "column %s" % desc,
+ "row": lambda desc: "row %s" % desc,
+ "corner": lambda desc: "corner",
+ "above": lambda lr: "above" if lr else "below",
+ "below": lambda lr: "below" if lr else "above",
+ "right": lambda lr: "to the right of" if lr else "to the left of",
+ "left": lambda lr: "to the left of" if lr else "to the right of"
}
#Generate phrases with each algorithm and print to screen
for i in range(1, 17):
obj_id = "d%s" % i
- print "%#02d,\"Full Brevity\",\"%s\"" % (i, generate_phrase(fb.describe(obj_id), ranked_attrs, handlers))
- print "%#02d,\"Relational\",\"%s\"" % (i, generate_phrase_rel(rel.describe(obj_id), ranked_attrs, obj_id, handlers))
- print "%#02d,\"Incremental\",\"%s\"" % (i, generate_phrase(incr.describe(obj_id), ranked_attrs, handlers))
-
+ print("%#02d,\"Full Brevity\",\"%s\"" % (i, generate_phrase(fb.describe(obj_id), ranked_attrs, handlers)))
+ print("%#02d,\"Relational\",\"%s\"" % (i, generate_phrase_rel(rel.describe(obj_id), ranked_attrs, obj_id, handlers)))
+ print("%#02d,\"Incremental\",\"%s\"" % (i, generate_phrase(incr.describe(obj_id), ranked_attrs, handlers)))
+
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/constraint.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/refexpr/constraint.py
--- ../python3/nltk_contrib/nltk_contrib/refexpr/constraint.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/refexpr/constraint.py (refactored)
@@ -37,6 +37,7 @@
"""
import random
import copy
+import collections
__all__ = ["Problem", "Variable", "Domain", "Unassigned",
"Solver", "BacktrackingSolver", "RecursiveBacktrackingSolver",
@@ -126,17 +127,17 @@
@type domain: list, tuple, or instance of C{Domain}
"""
if variable in self._variables:
- raise ValueError, "Tried to insert duplicated variable %s" % \
- repr(variable)
+ raise ValueError("Tried to insert duplicated variable %s" % \
+ repr(variable))
if type(domain) in (list, tuple):
domain = Domain(domain)
elif isinstance(domain, Domain):
domain = copy.copy(domain)
else:
- raise TypeError, "Domains must be instances of subclasses of "\
- "the Domain class"
+ raise TypeError("Domains must be instances of subclasses of "\
+ "the Domain class")
if not domain:
- raise ValueError, "Domain is empty"
+ raise ValueError("Domain is empty")
self._variables[variable] = domain
def addVariables(self, variables, domain):
@@ -184,11 +185,11 @@
@type variables: set or sequence of variables
"""
if not isinstance(constraint, Constraint):
- if callable(constraint):
+ if isinstance(constraint, collections.Callable):
constraint = FunctionConstraint(constraint)
else:
- raise ValueError, "Constraints must be instances of "\
- "subclasses of the Constraint class"
+ raise ValueError("Constraints must be instances of "\
+ "subclasses of the Constraint class")
self._constraints.append((constraint, variables))
def getSolution(self):
@@ -259,7 +260,7 @@
def _getArgs(self):
domains = self._variables.copy()
- allvariables = domains.keys()
+ allvariables = list(domains.keys())
constraints = []
for constraint, variables in self._constraints:
if not variables:
@@ -274,7 +275,7 @@
for constraint, variables in constraints[:]:
constraint.preProcess(variables, domains,
constraints, vconstraints)
- for domain in domains.values():
+ for domain in list(domains.values()):
domain.resetState()
if not domain:
return None, None, None
@@ -368,8 +369,7 @@
constraints affecting the given variables.
@type vconstraints: dict
"""
- raise NotImplementedError, \
- "%s is an abstract class" % self.__class__.__name__
+ raise NotImplementedError("%s is an abstract class" % self.__class__.__name__)
def getSolutions(self, domains, constraints, vconstraints):
"""
@@ -383,8 +383,7 @@
constraints affecting the given variables.
@type vconstraints: dict
"""
- raise NotImplementedError, \
- "%s provides only a single solution" % self.__class__.__name__
+ raise NotImplementedError("%s provides only a single solution" % self.__class__.__name__)
def getSolutionIter(self, domains, constraints, vconstraints):
"""
@@ -398,8 +397,7 @@
constraints affecting the given variables.
@type vconstraints: dict
"""
- raise NotImplementedError, \
- "%s doesn't provide iteration" % self.__class__.__name__
+ raise NotImplementedError("%s doesn't provide iteration" % self.__class__.__name__)
class BacktrackingSolver(Solver):
"""
@@ -514,12 +512,12 @@
# Push state before looking for next variable.
queue.RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/refexpr/constraint.py
append((variable, values, pushdomains))
- raise RuntimeError, "Can't happen"
+ raise RuntimeError("Can't happen")
def getSolution(self, domains, constraints, vconstraints):
iter = self.getSolutionIter(domains, constraints, vconstraints)
try:
- return iter.next()
+ return next(iter)
except StopIteration:
return None
@@ -665,9 +663,9 @@
# Initial assignment
for variable in domains:
assignments[variable] = random.choice(domains[variable])
- for _ in xrange(self._steps):
+ for _ in range(self._steps):
conflicted = False
- lst = domains.keys()
+ lst = list(domains.keys())
random.shuffle(lst)
for variable in lst:
# Check if variable is not in conflict
@@ -986,7 +984,7 @@
def __call__(self, variables, domains, assignments, forwardcheck=False,
_unassigned=Unassigned):
singlevalue = _unassigned
- for value in assignments.values():
+ for value in list(assignments.values()):
if singlevalue is _unassigned:
singlevalue = value
elif value != singlevalue:
@@ -1242,7 +1240,7 @@
def __call__(self, variables, domains, assignments, forwardcheck=False):
# preProcess() will remove it.
- raise RuntimeError, "Can't happen"
+ raise RuntimeError("Can't happen")
def preProcess(self, variables, domains, constraints, vconstraints):
set = self._set
@@ -1277,7 +1275,7 @@
def __call__(self, variables, domains, assignments, forwardcheck=False):
# preProcess() will remove it.
- raise RuntimeError, "Can't happen"
+ raise RuntimeError("Can't happen")
def preProcess(self, variables, domains, constraints, vconstraints):
set = self._set
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/refexpr/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/referring.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/referring.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/referring.py
--- ../python3/nltk_contrib/nltk_contrib/referring.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/referring.py (refactored)
@@ -221,23 +221,23 @@
Object2 = {"type":"chihuahua", "size":"large", "colour":"white"}
Object3 = {"type":"siamese-cat", "size":"small", "colour":"black"}
- print "Given an entity defined as: "
+ print("Given an entity defined as: ")
r = Object1
- print r
+ print(r)
preferred_attrs = ["type", "colour", "size"]
- print "In a set defined as: "
+ print("In a set defined as: ")
contrast_set = [Object2, Object3]
- print contrast_set
+ print(contrast_set)
RE = IncrementalAlgorithm(KB, r, contrast_set, preferred_attrs).RE
- print "The referring expression created to uniquely identify",
- print "the referent is: "
- print RE
+ print("The referring expression created to uniquely identify", end=' ')
+ print("the referent is: ")
+ print(RE)
RE_string = ""
for attr, val in RE:
RE_string = val + " " + RE_string
RE_string = "The " + RE_string
- print "This can be surface-realized as:"
- print RE_string
+ print("This can be surface-realized as:")
+ print(RE_string)
if __name__ == "__main__":
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py
--- ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/urlextracter.py (refactored)
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py
--- ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/textanalyzer.py (refactored)
@@ -3,9 +3,9 @@
import nltk.data
from nltk.tokenize import *
-import syllables_en
-import syllables_no
-from languageclassifier import *
+from . import syllables_en
+from . import syllables_no
+from .languageclassifier import *
import logging
class textanalyzer(object):
@@ -28,13 +28,13 @@
syllablesCount = self.countSyllables(words)
complexwordsCount = self.countComplexWords(text)
averageWordsPerSentence = wordCount/sentenceCount
- print ' Language: ' + self.lang
- print ' Number of characters: ' + str(charCount)
- print ' Number of words: ' + str(wordCount)
- print ' Number of sentences: ' + str(sentenceCount)
- print ' Number of syllables: ' + str(syllablesCount)
- print ' Number of complex words: ' + str(complexwordsCount)
- print ' Average words per sentence: ' + str(averageWordsPerSentence)
+ print(' Language: ' + self.lang)
+ print(' Number of characters: ' + str(charCount))
+ print(' Number of words: ' + str(wordCount))
+ print(' Number of sentences: ' + str(sentenceCount))
+ print(' Number of syllables: ' + str(syllablesCount))
+ print(' Number of complex words: ' + str(complexwordsCount))
+ print(' Average words per sentence: ' + str(averageWordsPerSentence))
#analyzeText = classmethod(analyzeText)
@@ -127,12 +127,12 @@
def _setEncoding(self,text):
try:
- text = unicode(text, "utf8").encode("utf8")
+ text = str(text, "utf8").encode("utf8")
except UnicodeError:
try:
- text = unicode(text, "iso8859_1").encode("utf8")
+ text = str(text, "iso8859_1").encode("utf8")
except UnicodeError:
- text = unicode(text, "ascii", "replace").encode("utf8")
+ text = str(text, "ascii", "replace").encode("utf8")
return text
#_setEncoding = classmethod(_setEncoding)
@@ -153,9 +153,9 @@
# \nthe people, for the people, shall not perish from this earth."
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py
WARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py
--- ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/syllables_no.py (refactored)
@@ -78,10 +78,10 @@
if line:
toks = line.split()
assert len(toks) == 2
- syllablesInFile[_stripWord(unicode(toks[0],"latin-1").encode("utf-8"))] = int(toks[1])
+ syllablesInFile[_stripWord(str(toks[0],"latin-1").encode("utf-8"))] = int(toks[1])
def count(word):
- word = unicode(word,"utf-8").encode("utf-8")
+ word = str(word,"utf-8").encode("utf-8")
word = _stripWord(word)
if not word:
@@ -96,7 +96,7 @@
# Count vowel groups
count = 0
prev_was_vowel = 0
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/syllables_en.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/readability/syllables_en.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/syllables_en.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/readabilitytests.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/readabilitytests.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/readabilitytests.py
--- ../python3/nltk_contrib/nltk_contrib/readability/readabilitytests.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/readabilitytests.py (refactored)
@@ -1,4 +1,4 @@
-from textanalyzer import *
+from .textanalyzer import *
import math
class ReadabilityTool:
@@ -196,17 +196,17 @@
# print ' RIX : %.1f' % rix
# print '*' * 70
- print "=" * 100
- print "Recommended tests for lang: %s" % self.lang
- print "=" * 100
- for testname in self.tests_given_lang[self.lang].keys():
- print testname + " : %.2f" % self.tests_given_lang[self.lang][testname](text)
- print "=" * 100
- print "Other tests: (Warning! Use with care)"
- print "=" * 100
- for testname in self.tests_given_lang["all"].keys():
- if not self.tests_given_lang[self.lang].has_key(testname):
- print testname + " : %.2f" % self.tests_given_lang["all"][testname](text)
+ print("=" * 100)
+ print("Recommended tests for lang: %s" % self.lang)
+ print("=" * 100)
+ for testname in list(self.tests_given_lang[self.lang].keys()):
+ print(testname + " : %.2f" % self.tests_given_lang[self.lang][testname](text))
+ print("=" * 100)
+ print("Other tests: (Warning! Use with care)")
+ print("=" * 100)
+ for testname in list(self.tests_given_lang["all"].keys()):
+ if testname not in self.tests_given_lang[self.lang]:
+ print(testname + " : %.2f" % self.tests_given_lang["all"][testname](text))
def demo(self):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py
--- ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py (refactored)
@@ -11,7 +11,7 @@
from nltk.corpus import stopwords
-from urlextracter import URLextracter
+from .urlextracter import URLextracter
from sgmllib import *
class NaiveBayes(object):
@@ -72,12 +72,12 @@
values = file.split('/')
lang = values[-2]
- if not self.p_lang.has_key(lang):
+ if lang not in self.p_lang:
self.p_lang[lang] = 0.0
self.p_lang[lang] += 1.0
- if not self.files.has_key(lang):
+ if lang not in self.files:
self.files[lang] = []
f = open(file, 'r')
@@ -85,35 +85,35 @@
f.close()
# Calculate probabilities
- for lang in self.p_lang.keys():
+ for lang in list(self.p_lang.keys()):
self.p_lang[lang] /= len(self.training_files)
self.vocabulary = self.__createVocabulary(self.files)
# Calculate P(O | H)
p_word_given_lang = self.p_word_given_lang
- for lang in self.files.keys():
+ for lang in list(self.files.keys()):
p_word_given_lang[lang] = {}
- for word in self.vocabulary[lang].keys():
+ for word in list(self.vocabulary[lang].keys()):
p_word_given_lang[lang][word] = 1.0
for word in self.files[lang]:
- if self.vocabulary[lang].has_key(word):
+ if word in self.vocabulary[lang]:
p_word_given_lang[lang][word] += 1.0
- for word in self.vocabulary[lang].keys():
+ for word in list(self.vocabulary[lang].keys()):
p_word_given_lang[lang][word] /= len(self.files[lang]) + len(self.vocabulary[lang])
- print "Training finished...(training-set of size %d)" % len(self.training_files)
+ print("Training finished...(training-set of size %d)" % len(self.training_files))
self.p_word_given_lang = p_word_given_lang
- self.candidate_languages = self.files.keys()
+ self.candidate_languages = list(self.files.keys())
# Save result as a file
output = open(os.path.join("files","lang_data.pickle"),'wb')
data = {}
data["p_word_given_lang"] = p_word_given_lang
- data["canidate_languages"] = self.files.keys()
+ data["canidate_languages"] = list(self.files.keys())
data["p_lang"] = self.p_lang
data["vocabulary"] = self.vocabulary
pickler = pickle.dump(data, output, -1)
@@ -128,16 +128,16 @@
"""
# Count number of occurance of each word
word_count = {}
- for lang in files.keys():
+ for lang in list(files.keys()):
for word in files[lang]:
- if not word_count.has_key(word):
+ if word not in word_count:
word_count[word] = 0
word_count[word] += 1
vocabulary = {}
vocabulary['eng'] = {}
vocabulary['no'] = {}
- for word in word_count.keys():
+ for word in list(word_count.keys()):
if word_count[word] > 2:
if word != '':
if not word in self.nor_stopwords:
@@ -155,7 +155,7 @@
"""
if test_files == "":
- print "No test files given"
+ print("No test files given")
return
elif os.path.isdir(str(test_files)):
self.test_files = glob.glob(test_files + "/*/*")
@@ -186,7 +186,7 @@
# Calculates P(O | H) * P(H) for candidate group
p = math.log(self.p_lang[candidate_lang])
for word in file_to_be_classified:
- if self.vocabulary[candidate_lang].has_key(word):
+ if word iWARNING: couldn't encode ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py's diff for your terminal
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/languageclassifier.py
n self.vocabulary[candidate_lang]:
p += math.log(self.p_word_given_lang[candidate_lang][word])
if p > max_p or max_p == 1:
@@ -196,10 +196,10 @@
total += 1.0
if true_lang != max_lang:
errors += 1.0
- print "Classifying finished...(test-set of size %d)" % len(self.test_files)
- print "Errors %d" % errors
- print "Total %d" % total
- print "Accuracy: %.3f" % (1.0 - errors/total)
+ print("Classifying finished...(test-set of size %d)" % len(self.test_files))
+ print("Errors %d" % errors)
+ print("Total %d" % total)
+ print("Accuracy: %.3f" % (1.0 - errors/total))
def classifyText(self, text):
"""
@@ -219,7 +219,7 @@
unknown_words = []
known_words = []
for word in words:
- if self.vocabulary[candidate_lang].has_key(word):
+ if word in self.vocabulary[candidate_lang]:
p += math.log(self.p_word_given_lang[candidate_lang][word])
if word not in known_words:
known_words.append(word)
@@ -241,7 +241,7 @@
def classifyURL(self, url):
ue = URLextracter(url)
- print 'Classifying %s' % url
+ print('Classifying %s' % url)
content = ue.output()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/crawler.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/readability/crawler.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/crawler.py
--- ../python3/nltk_contrib/nltk_contrib/readability/crawler.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/readability/crawler.py (refactored)
@@ -3,7 +3,7 @@
import random
import os,re
-from urlextracter import *
+from .urlextracter import *
from sgmllib import *
class Crawler:
@@ -13,13 +13,13 @@
def crawl(self,url):
self.current = url
- print "Crawling " + url
+ print("Crawling " + url)
try:
ue = URLextracter(url)
except SGMLParseError:
- print "This URL contains error that can't be handled by this app.\nSorry!"
- print "=" * 30
- print "Trying new random URL"
+ print("This URL contains error that can't be handled by this app.\nSorry!")
+ print("=" * 30)
+ print("Trying new random URL")
self.crawl(self.urls[random.randint(1,len(self.urls))])
return
@@ -30,7 +30,7 @@
filename += part + "."
filename += "txt"
- print "Stored as: " + filename
+ print("Stored as: " + filename)
urls = ""
try:
# Set the path of where to store your data
@@ -41,7 +41,7 @@
if len(content) > 2: # Minimum 3 words
try:
- textToWrite = unicode("".join(content))
+ textToWrite = str("".join(content))
except UnicodeDecodeError:
textToWrite = str("".join(content))
f.write(textToWrite)
@@ -50,9 +50,9 @@
# Set this path to same as storage path
os.remove("/path/to/saved/data/lang/%s" % filename)
urls = ue.linklist
- print "" + url + " mined!"
+ print("" + url + " mined!")
except IOError:
- print "Mined, but failed to store as file.\nSkipping this, going on to next!"
+ print("Mined, but failed to store as file.\nSkipping this, going on to next!")
urls = self.urls
ok_urls = []
for i in urls:
@@ -68,12 +68,12 @@
if len(ok_urls) < 2:
ok_urls = self.crawled
unique = True # Fake true
- print str(len(ok_urls))
+ print(str(len(ok_urls)))
else:
unique = False
next = random.randint(1,len(ok_urls)-1)
- print next
+ print(next)
new_url = ok_urls[next]
while not unique:
next = random.randint(1,len(ok_urls)-1)
@@ -86,7 +86,7 @@
new_url = ok_urls[next]
unique = True
else:
- print "Already crawled " + new_url
+ print("Already crawled " + new_url)
ok_urls.remove(new_url)
if len(ok_urls) < 2:
ok_urls = self.crawled
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/readability/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/readability/__init__.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/readability/__init__.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/rdf/rdfvizualize.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/rdf/rdfvizualize.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/rdf/rdfvizualize.py
--- ../python3/nltk_contrib/nltk_contrib/rdf/rdfvizualize.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/rdf/rdfvizualize.py (refactored)
@@ -66,7 +66,7 @@
# add subjects and objects as nodes in the Dot instance
for s, o in self.graph.subject_objects():
for uri in s, o:
- if uri not in nodes.keys():
+ if uri not in list(nodes.keys()):
# generate a new node identifier
node_id = "n%03d" % count
nodes[uri] = node_id
@@ -121,9 +121,9 @@
try:
store = ConjunctiveGraph()
store.parse(FILE, format='xml')
- print store.serialize(format='xml')
+ print(store.serialize(format='xml'))
except OSError:
- print "Cannot read file '%s'" % FILE
+ print("Cannot read file '%s'" % FILE)
def make_dot_demo(infile):
try:
@@ -133,13 +133,13 @@
v = Visualizer(store)
g = v.graph2dot(filter_edges=True)
g.write('%s.dot' % basename)
- print "Wrote '%s.dot'" % basename
+ print("Wrote '%s.dot'" % basename)
g.write_png('%s.png' % basename, prog='dot')
- print "Wrote '%s.png'" % basename
+ print("Wrote '%s.png'" % basename)
g.write_svg('%s.svg' % basename, prog='dot')
- print "Wrote '%s.svg'" % basename
+ print("Wrote '%s.svg'" % basename)
except OSError:
- print "Cannot read file '%s'" % FILE
+ print("Cannot read file '%s'" % FILE)
def main():
@@ -169,9 +169,9 @@
#print '*' * 30
#serialize_demo()
- print
- print "Visualise an rdf graph with Graphviz"
- print '*' * 30
+ print()
+ print("Visualise an rdf graph with Graphviz")
+ print('*' * 30)
make_dot_demo(infile)
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/rdf/rdfquery.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/rdf/rdfquery.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/rdf/rdfquery.py
--- ../python3/nltk_contrib/nltk_contrib/rdf/rdfquery.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/rdf/rdfquery.py (refactored)
@@ -86,7 +86,7 @@
semrep = sem.root_semrep(tree)
trans = SPARQLTranslator()
trans.translate(semrep)
- print trans.query
+ print(trans.query)
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/rdf/rdf.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/rdf/rdf.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/rdf/rdf.py
--- ../python3/nltk_contrib/nltk_contrib/rdf/rdf.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/rdf/rdf.py (refactored)
@@ -27,7 +27,7 @@
object = sym2uri(ns, reldict['objclass'], reldict['objsym'])
triple = (subject, predicate, object)
if verbose:
- print triple
+ print(triple)
return triple
def make_rdfs(ns, reldict):
@@ -47,7 +47,7 @@
"""
Build a URI out of a base, a class term, and a symbol.
"""
- from urllib import quote
+ from urllib.parse import quote
from rdflib import Namespace
rdfclass = class_abbrev(rdfclass)
rdfclass = rdfclass.lower()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/train.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/train.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/train.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/tagparse.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/tagparse.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/tagparse.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/tagparse.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/tagparse.py (refactored)
@@ -1,6 +1,6 @@
from nltk.parse import chart
from nltk import cfg
-from drawchart import ChartDemo
+from .drawchart import ChartDemo
from nltk.tokenize.regexp import wordpunct
#from nltk_contrib.mit.six863.kimmo import *
import re, pickle
@@ -27,7 +27,7 @@
match = re.match(r"PREFIX\('.*?'\)(.*?)\(.*", feat)
if match: pos = match.groups()[0]
else: pos = feat.split('(')[0]
- print surface, pos
+ print(surface, pos)
leafedge = chart.LeafEdge(word, i)
thechart.insert(chart.TreeEdge((i, i+1),
cfg.Nonterminal(pos), [word], dot=1), [leafedge])
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/drawchart.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/drawchart.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/drawchart.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/drawchart.py (refactored)
@@ -39,8 +39,8 @@
# widget system.
import pickle
-from tkFileDialog import asksaveasfilename, askopenfilename
-import Tkinter, tkFont, tkMessageBox
+from tkinter.filedialog import asksaveasfilename, askopenfilename
+import tkinter, tkinter.font, tkinter.messagebox
import math
import os.path
@@ -103,12 +103,12 @@
self._selected_cell = None
if toplevel:
- self._root = Tkinter.Toplevel(parent)
+ self._root = tkinter.Toplevel(parent)
self._root.title(title)
self._root.bind('', self.destroy)
self._init_quit(self._root)
else:
- self._root = Tkinter.Frame(parent)
+ self._root = tkinter.Frame(parent)
self._init_matrix(self._root)
self._init_list(self._root)
@@ -124,18 +124,18 @@
self.draw()
def _init_quit(self, root):
- quit = Tkinter.Button(root, text='Quit', command=self.destroy)
+ quit = tkinter.Button(root, text='Quit', command=self.destroy)
quit.pack(side='bottom', expand=0, fill='none')
def _init_matrix(self, root):
- cframe = Tkinter.Frame(root, border=2, relief='sunken')
+ cframe = tkinter.Frame(root, border=2, relief='sunken')
cframe.pack(expand=0, fill='none', padx=1, pady=3, side='top')
- self._canvas = Tkinter.Canvas(cframe, width=200, height=200,
+ self._canvas = tkinter.Canvas(cframe, width=200, height=200,
background='white')
self._canvas.pack(expand=0, fill='none')
def _init_numedges(self, root):
- self._numedges_label = Tkinter.Label(root, text='0 edges')
+ self._numedges_label = tkinter.Label(root, text='0 edges')
self._numedges_label.pack(expand=0, fill='none', side='top')
def _init_list(self, root):
@@ -212,8 +212,8 @@
except: pass
def _fire_callbacks(self, event, *args):
- if not self._callbacks.has_key(event): return
- for cb_func in self._callbacks[event].keys(): cb_func(*args)
+ if event not in self._callbacks: return
+ for cb_func in list(self._callbacks[event].keys()): cb_func(*args)
def select_cell(self, i, j):
if self._root is None: return
@@ -274,9 +274,9 @@
# Labels and dotted lines
for i in range(N):
c.create_text(LEFT_MARGIN-2, i*dy+dy/2+TOP_MARGIN,
- text=`i`, anchor='e')
+ text=repr(i), anchor='e')
c.create_text(i*dx+dx/2+LEFT_MARGIN, N*dy+TOP_MARGIN+1,
- text=`i`, anchor='n')
+ text=repr(i), anchor='n')
c.create_line(LEFT_MARGIN, dy*(i+1)+TOP_MARGIN,
dx*N+LEFT_MARGIN, dy*(i+1)+TOP_MARGIN, dash='.')
c.create_line(dx*i+LEFT_MARGIN, TOP_MARGIN,
@@ -327,21 +327,21 @@
self._selectbox = None
if toplevel:
- self._root = Tkinter.Toplevel(parent)
+ self._root = tkinter.Toplevel(parent)
self._root.title('Chart Parsing Demo: Results')
self._root.bind('', self.destroy)
else:
- self._root = Tkinter.Frame(parent)
+ self._root = tkinter.Frame(parent)
# Buttons
if toplevel:
- buttons = Tkinter.Frame(self._root)
+ buttons = tkinter.Frame(self._root)
buttons.pack(side='bottom', expand=0, fill='x')
- Tkinter.Button(buttons, text='Quit',
+ tkinter.Button(buttons, text='Quit',
command=self.destroy).pack(side='right')
- Tkinter.Button(buttons, text='Print All',
+ tkinter.Button(buttons, text='Print All',
command=self.print_all).pack(side='left')
- Tkinter.Button(buttons, text='Print Selection',
+ tkinter.Button(buttons, text='Print Selection',
command=self.print_selection).pack(side='left')
# Canvas frame.
@@ -404,7 +404,7 @@
def print_selection(self, *e):
if self._root is None: return
if self._selection is None:
- tkMessageBox.showerror('Print Error', 'No tree selected')
+ tkinter.messagebox.showerror('Print Error', 'No tree selected')
else:
c = self._cframe.canvas()
for widget in self._treewidgets:
@@ -509,7 +509,7 @@
self._operator = None
# Set up the root window.
- self._root = Tkinter.Tk()
+ self._root = tkinter.Tk()
self._root.title('Chart Comparison')
self._root.bind('', self.destroy)
self._root.bind('', self.destroy)
@@ -540,10 +540,10 @@
#////////////////////////////////////////////////////////////
def _init_menubar(self, root):
- menubar = Tkinter.Menu(root)
+ menubar = tkinter.Menu(root)
# File menu
- filemenu = Tkinter.Menu(menubar, tearoff=0)
+ filemenu = tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Load Chart', accelerator='Ctrl-o',
underline=0, command=self.load_chart_dialog)
filemenu.add_command(label='Save Output', accelerator='Ctrl-s',
@@ -554,7 +554,7 @@
menubar.add_cascade(label='File', underline=0, menu=filemenu)
# Compare menu
- opmenu = Tkinter.Menu(menubar, tearoff=0)
+ opmenu = tkinter.Menu(menubar, tearoff=0)
opmenu.add_command(label='Intersection',
command=self._intersection,
accelerator='+')
@@ -573,21 +573,21 @@
self._root.config(menu=menubar)
def _init_divider(self, root):
- divider = Tkinter.Frame(root, border=2, relief='sunken')
+ divider = tkinter.Frame(root, border=2, relief='sunken')
divider.pack(side='top', fill='x', ipady=2)
def _init_chartviews(self, root):
opfont=('symbol', -36) # Font for operator.
eqfont=('helvetica', -36) # Font for equals sign.
- frame = Tkinter.Frame(root, background='#c0c0c0')
+ frame = tkinter.Frame(root, background='#c0c0c0')
frame.pack(side='top', expand=1, fill='both')
# The left matrix.
- cv1_frame = Tkinter.Frame(frame, border=3, relief='groove')
+ cv1_frame = tkinter.Frame(frame, border=3, relief='groove')
cv1_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
self._left_selector = MutableOptionMenu(
- cv1_frame, self._charts.keys(), command=self._select_left)
+ cv1_frame, list(self._charts.keys()), command=self._select_left)
self._left_selector.pack(side='top', pady=5, fill='x')
self._left_matrix = ChartMatrixView(cv1_frame, self._emptychart,
toplevel=False,
@@ -599,15 +599,15 @@
self._left_matrix.inactivate()
# The operator.
- self._op_label = Tkinter.Label(frame, text=' ', width=3,
+ self._op_label = tkinter.Label(frame, text=' ', width=3,
background='#c0c0c0', font=opfont)
self._op_label.pack(side='left', padx=5, pady=5)
# The right matrix.
- cv2_frame = Tkinter.Frame(frame, border=3, relief='groove')
+ cv2_frame = tkinter.Frame(frame, border=3, relief='groove')
cv2_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
self._right_selector = MutableOptionMenu(
- cv2_frame, self._charts.keys(), command=self._select_right)
+ cv2_frame, list(self._charts.keys()), command=self._select_right)
self._right_selector.pack(side='top', pady=5, fill='x')
self._right_matrix = ChartMatrixView(cv2_frame, self._emptychart,
toplevel=False,
@@ -619,13 +619,13 @@
self._right_matrix.inactivate()
# The equals sign
- Tkinter.Label(frame, text='=', width=3, background='#c0c0c0',
+ tkinter.Label(frame, text='=', width=3, background='#c0c0c0',
font=eqfont).pack(side='left', padx=5, pady=5)
# The output matrix.
- out_frame = Tkinter.Frame(frame, border=3, relief='groove')
+ out_frame = tkinter.Frame(frame, border=3, relief='groove')
out_frame.pack(side='left', padx=8, pady=7, expand=1, fill='both')
- self._out_label = Tkinter.Label(out_frame, text='Output')
+ self._out_label = tkinter.Label(out_frame, text='Output')
self._out_label.pack(side='top', pady=9)
self._out_matrix = ChartMatrixView(out_frame, self._emptychart,
toplevel=False,
@@ -637,19 +637,19 @@
self._out_matrix.inactivate()
def _init_buttons(self, root):
- buttons = Tkinter.Frame(root)
+ buttons = tkinter.Frame(root)
buttons.pack(side='bottom', pady=5, fill='x', expand=0)
- Tkinter.Button(buttons, text='Intersection',
+ tkinter.Button(buttons, text='Intersection',
command=self._intersection).pack(side='left')
- Tkinter.Button(buttons, text='Union',
+ tkinter.Button(buttons, text='Union',
command=self._union).pack(side='left')
- Tkinter.Button(buttons, text='Difference',
+ tkinter.Button(buttons, text='Difference',
command=self._difference).pack(side='left')
- Tkinter.Frame(buttons, width=20).pack(side='left')
- Tkinter.Button(buttons, text='Swap Charts',
+ tkinter.Frame(buttons, width=20).pack(side='left')
+ tkinter.Button(buttons, text='Swap Charts',
command=self._swapcharts).pack(side='left')
- Tkinter.Button(buttons, text='Detatch Output',
+ tkinter.Button(buttons, text='Detatch Output',
command=self._detatch_out).pack(side='right')
def _init_bindings(self, root):
@@ -692,8 +692,8 @@
defaultextension='.pickle')
if not filename: return
try: pickle.dump((self._out_chart), open(filename, 'w'))
- except Exception, e:
- tkMessageBox.showerror('Error Saving Chart',
+ except Exception as e:
+ tkinter.messagebox.showerror('Error Saving Chart',
'Unable to open file: %r\n%s' %
(filename, e))
@@ -702,8 +702,8 @@
defaultextension='.pickle')
if not filename: return
try: self.load_chart(filename)
- except Exception, e:
- tkMessageBox.showerror('Error Loading Chart',
+ except Exception as e:
+ tkinter.messagebox.showerror('Error Loading Chart',
'Unable to open file: %r\n%s' %
(filename, e))
@@ -925,12 +925,12 @@
# If they didn't provide a main window, then set one up.
if root is None:
- top = Tkinter.Tk()
+ top = tkinter.Tk()
top.title('Chart View')
def destroy1(e, top=top): top.destroy()
def destroy2(top=top): top.destroy()
top.bind('q', destroy1)
- b = Tkinter.Button(top, text='Done', command=destroy2)
+ b = tkinter.Button(top, text='Done', command=destroy2)
b.pack(side='bottom')
self._root = top
else:
@@ -947,9 +947,9 @@
# Create the sentence canvas.
if draw_sentence:
- cframe = Tkinter.Frame(self._root, relief='sunk', border=2)
+ cframe = tkinter.Frame(self._root, relief='sunk', border=2)
cframe.pack(fill='both', side='bottom')
- self._sentence_canvas = Tkinter.Canvas(cframe, height=50)
+ self._sentence_canvas = tkinter.Canvas(cframe, height=50)
self._sentence_canvas['background'] = '#e0e0e0'
self._sentence_canvas.pack(fill='both')
#self._sentence_canvas['height'] = self._sentence_height
@@ -976,12 +976,12 @@
def _init_fonts(self, root):
- self._boldfont = tkFont.Font(family='helvetica', weight='bold',
+ self._boldfont = tkinter.font.Font(family='helvetica', weight='bold',
size=self._fontsize)
- self._font = tkFont.Font(family='helvetica',
+ self._font = tkinter.font.Font(family='helvetica',
size=self._fontsize)
# See:
- self._sysfont = tkFont.Font(font=Tkinter.Button()["font"])
+ self._sysfont = tkinter.font.Font(font=tkinter.Button()["font"])
root.option_add("*Font", self._sysfont)
def _sb_canvas(self, root, expand='y',
@@ -989,12 +989,12 @@
"""
Helper for __init__: construct a canvas with a scrollbar.
"""
- cframe =Tkinter.Frame(root, relief='sunk', border=2)
+ cframe =tkinter.Frame(root, relief='sunk', border=2)
cframe.pack(fill=fill, expand=expand, side=side)
- canvas = Tkinter.Canvas(cframe, background='#e0e0e0')
+ canvas = tkinter.Canvas(cframe, background='#e0e0e0')
# Give the canvas a scrollbar.
- sb = Tkinter.Scrollbar(cframe, orient='vertical')
+ sb = tkinter.Scrollbar(cframe, orient='vertical')
sb.pack(side='right', fill='y')
canvas.pack(side='left', fill=fill, expand='yes')
@@ -1079,7 +1079,7 @@
self._resize()
else:
for edge in self._chart:
- if not self._edgetags.has_key(edge):
+ if edge not in self._edgetags:
self._add_edge(edge)
self._resize()
@@ -1139,7 +1139,7 @@
- Find an available level
- Call _draw_edge
"""
- if self._edgetags.has_key(edge): return
+ if edge in self._edgetags: return
self._analyze_edge(edge)
self._grow()
@@ -1246,11 +1246,11 @@
If no colors are specified, use intelligent defaults
(dependant on selection, etc.)
"""
- if not self._edgetags.has_key(edge): return
+ if edge not in self._edgetags: return
c = self._chart_canvas
if linecolor is not None and textcolor is not None:
- if self._marks.has_key(edge):
+ if edge in self._marks:
linecolor = self._marks[edge]
tags = self._edgetags[edge]
c.itemconfig(tags[0], fill=linecolor)
@@ -1262,7 +1262,7 @@
return
else:
N = self._chart.num_leaves()
- if self._marks.has_key(edge):
+ if edge in self._marks:
self._color_edge(self._marks[edge])
if (edge.is_complete() and edge.span() == (0, N)):
self._color_edge(edge, '#084', '#042')
@@ -1283,7 +1283,7 @@
Unmark an edge (or all edges)
"""
if edge == None:
- old_marked_edges = self._marks.keys()
+ old_marked_edges = list(self._marks.keys())
self._marks = {}
for edge in old_marked_edges:
self._color_edge(edge)
@@ -1379,7 +1379,7 @@
c2.tag_lower(t2)
t3=c3.create_line(x, 0, x, BOTTOM)
c3.tag_lower(t3)
- t4=c3.create_text(x+2, 0, text=`i`, anchor='nw',
+ t4=c3.create_text(x+2, 0, text=repr(i), anchor='nw',
font=self._font)
c3.tag_lower(t4)
#if i % 4 == 0:
@@ -1574,8 +1574,8 @@
except: pass
def _fire_callbacks(self, event, *args):
- if not self._callbacks.has_key(event): return
- for cb_func in self._callbacks[event].keys(): cb_func(*args)
+ if event not in self._callbacks: return
+ for cb_func in list(self._callbacks[event].keys()): cb_func(*args)
#######################################################################
# Pseudo Earley Rule
@@ -1659,14 +1659,14 @@
self._root = None
try:
# Create the root window.
- self._root = Tkinter.Tk()
+ self._root = tkinter.Tk()
self._root.title(title)
self._root.bind('', self.destroy)
# Set up some frames.
- frame3 = Tkinter.Frame(self._root)
- frame2 = Tkinter.Frame(self._root)
- frame1 = Tkinter.Frame(self._root)
+ frame3 = tkinter.Frame(self._root)
+ frame2 = tkinter.Frame(self._root)
+ frame1 = tkinter.Frame(self._root)
frame3.pack(side='bottom', fill='none')
frame2.pack(side='bottom', fill='x')
frame1.pack(side='bottom', fill='both', expand=1)
@@ -1687,7 +1687,7 @@
self.reset()
except:
- print 'Error creating Tree View'
+ print('Error creating Tree View')
self.destroy()
raise
@@ -1725,25 +1725,25 @@
def _init_fonts(self, root):
# See:
- self._sysfont = tkFont.Font(font=Tkinter.Button()["font"])
+ self._sysfont = tkinter.font.Font(font=tkinter.Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
- self._size = Tkinter.IntVar(root)
+ self._size = tkinter.IntVar(root)
self._size.set(self._sysfont.cget('size'))
- self._boldfont = tkFont.Font(family='helvetica', weight='bold',
+ self._boldfont = tkinter.font.Font(family='helvetica', weight='bold',
size=self._size.get())
- self._font = tkFont.Font(family='helvetica',
+ self._font = tkinter.font.Font(family='helvetica',
size=self._size.get())
def _init_animation(self):
# Are we stepping? (default=yes)
- self._step = Tkinter.IntVar(self._root)
+ self._step = tkinter.IntVar(self._root)
self._step.set(1)
# What's our animation speed (default=fast)
- self._animate = Tkinter.IntVar(self._root)
+ self._animate = tkinter.IntVar(self._root)
self._animate.set(3) # Default speed = fast
# Are we currently animating?
@@ -1757,60 +1757,60 @@
def _init_rulelabel(self, parent):
ruletxt = 'Last edge generated by:'
- self._rulelabel1 = Tkinter.Label(parent,text=ruletxt,
+ self._rulelabel1 = tkinter.Label(parent,text=ruletxt,
font=self._boldfont)
- self._rulelabel2 = Tkinter.Label(parent, width=40,
+ self._rulelabel2 = tkinter.Label(parent, width=40,
relief='groove', anchor='w',
font=self._boldfont)
self._rulelabel1.pack(side='left')
self._rulelabel2.pack(side='left')
- step = Tkinter.Checkbutton(parent, variable=self._step,
+ step = tkinter.Checkbutton(parent, variable=self._step,
text='Step')
step.pack(side='right')
def _init_buttons(self, parent):
- frame1 = Tkinter.Frame(parent)
- frame2 = Tkinter.Frame(parent)
+ frame1 = tkinter.Frame(parent)
+ frame2 = tkinter.Frame(parent)
frame1.pack(side='bottom', fill='x')
frame2.pack(side='top', fill='none')
- Tkinter.Button(frame1, text='Reset\nParser',
+ tkinter.Button(frame1, text='Reset\nParser',
background='#90c0d0', foreground='black',
command=self.reset).pack(side='right')
#Tkinter.Button(frame1, text='Pause',
# background='#90c0d0', foreground='black',
# command=self.pause).pack(side='left')
- Tkinter.Button(frame1, text='Top Down\nStrategy',
+ tkinter.Button(frame1, text='Top Down\nStrategy',
background='#90c0d0', foreground='black',
command=self.top_down_strategy).pack(side='left')
- Tkinter.Button(frame1, text='Bottom Up\nStrategy',
+ tkinter.Button(frame1, text='Bottom Up\nStrategy',
background='#90c0d0', foreground='black',
command=self.bottom_up_strategy).pack(side='left')
- Tkinter.Button(frame1, text='Earley\nAlgorithm',
+ tkinter.Button(frame1, text='Earley\nAlgorithm',
background='#90c0d0', foreground='black',
command=self.earley_algorithm).pack(side='left')
- Tkinter.Button(frame2, text='Top Down Init\nRule',
+ tkinter.Button(frame2, text='Top Down Init\nRule',
background='#90f090', foreground='black',
command=self.top_down_init).pack(side='left')
- Tkinter.Button(frame2, text='Top Down Expand\nRule',
+ tkinter.Button(frame2, text='Top Down Expand\nRule',
background='#90f090', foreground='black',
command=self.top_down_expand).pack(side='left')
- Tkinter.Button(frame2, text='Top Down Match\nRule',
+ tkinter.Button(frame2, text='Top Down Match\nRule',
background='#90f090', foreground='black',
command=self.top_down_match).pack(side='left')
- Tkinter.Frame(frame2, width=20).pack(side='left')
-
- Tkinter.Button(frame2, text='Bottom Up Init\nRule',
+ tkinter.Frame(frame2, width=20).pack(side='left')
+
+ tkinter.Button(frame2, text='Bottom Up Init\nRule',
background='#90f090', foreground='black',
command=self.bottom_up_init).pack(side='left')
- Tkinter.Button(frame2, text='Bottom Up Predict\nRule',
+ tkinter.Button(frame2, text='Bottom Up Predict\nRule',
background='#90f090', foreground='black',
command=self.bottom_up).pack(side='left')
- Tkinter.Frame(frame2, width=20).pack(side='left')
-
- Tkinter.Button(frame2, text='Fundamental\nRule',
+ tkinter.Frame(frame2, width=20).pack(side='left')
+
+ tkinter.Button(frame2, text='Fundamental\nRule',
background='#90f090', foreground='black',
command=self.fundamental).pack(side='left')
@@ -1844,9 +1844,9 @@
self._root.bind('s', lambda e,s=self._step:s.set(not s.get()))
def _init_menubar(self):
- menubar = Tkinter.Menu(self._root)
-
- filemenu = Tkinter.Menu(menubar, tearoff=0)
+ menubar = tkinter.Menu(self._root)
+
+ filemenu = tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Chart', underline=0,
command=self.save_chart, accelerator='Ctrl-s')
filemenu.add_command(label='Load Chart', underline=0,
@@ -1863,7 +1863,7 @@
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
- editmenu = Tkinter.Menu(menubar, tearoff=0)
+ editmenu = tkinter.Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Grammar', underline=5,
command=self.edit_grammar,
accelerator='Ctrl-g')
@@ -1872,14 +1872,14 @@
accelerator='Ctrl-t')
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
- viewmenu = Tkinter.Menu(menubar, tearoff=0)
+ viewmenu = tkinter.Menu(menubar, tearoff=0)
viewmenu.add_command(label='Chart Matrix', underline=6,
command=self.view_matrix)
viewmenu.add_command(label='Results', underline=0,
command=self.view_results)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
- rulemenu = Tkinter.Menu(menubar, tearoff=0)
+ rulemenu = tkinter.Menu(menubar, tearoff=0)
rulemenu.add_command(label='Top Down Strategy', underline=0,
command=self.top_down_strategy,
accelerator='t')
@@ -1904,7 +1904,7 @@
command=self.fundamental)
menubar.add_cascade(label='Apply', underline=0, menu=rulemenu)
- animatemenu = Tkinter.Menu(menubar, tearoff=0)
+ animatemenu = tkinter.Menu(menubar, tearoff=0)
animatemenu.add_checkbutton(label="Step", underline=0,
variable=self._step,
accelerator='s')
@@ -1922,7 +1922,7 @@
accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
- zoommenu = Tkinter.Menu(menubar, tearoff=0)
+ zoommenu = tkinter.Menu(menubar, tearoff=0)
zoommenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
zoommenu.add_radiobutton(label='Small', variable=self._size,
@@ -1935,7 +1935,7 @@
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='Zoom', underline=0, menu=zoommenu)
- helpmenu = Tkinter.Menu(menubar, tearoff=0)
+ helpmenu = tkinter.Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
@@ -2010,7 +2010,7 @@
def about(self, *e):
ABOUT = ("NLTK Chart Parser Demo\n"+
"Written by Edward Loper")
- tkMessageBox.showinfo('About: Chart Parser Demo', ABOUT)
+ tkinter.messagebox.showinfo('About: Chart Parser Demo', ABOUT)
#////////////////////////////////////////////////////////////
# File Menu
@@ -2035,9 +2035,9 @@
if self._matrix: self._matrix.deselect_cell()
if self._results: self._results.set_chart(chart)
self._cp.set_chart(chart)
- except Exception, e:
+ except Exception as e:
raise
- tkMessageBox.showerror('Error Loading Chart',
+ tkinter.messagebox.showerror('Error Loading Chart',
'Unable to open file: %r' % filename)
def save_chart(self, *args):
@@ -2047,9 +2047,9 @@
if not filename: return
try:
pickle.dump(self._chart, open(filename, 'w'))
- except Exception, e:
+ except Exception as e:
raise
- tkMessageBox.showerror('Error Saving Chart',
+ tkinter.messagebox.showerror('Error Saving Chart',
'Unable to open file: %r' % filename)
def load_grammar(self, *args):
@@ -2063,8 +2063,8 @@
else:
grammar = cfg.parse_grammar(open(filename, 'r').read())
self.set_grammar(grammar)
- except Exception, e:
- tkMessageBox.showerror('Error Loading Grammar',
+ except Exception as e:
+ tkinter.messagebox.showerror('Error Loading Grammar',
'Unable to open file: %r' % filename)
def save_grammar(self, *args):
@@ -2082,8 +2082,8 @@
for prod in start: file.write('%s\n' % prod)
for prod in rest: file.write('%s\n' % prod)
file.close()
- except Exception, e:
- tkMessageBox.showerror('Error Saving Grammar',
+ except Exception as e:
+ tkinter.messagebox.showerror('Error Saving Grammar',
'Unable to open file: %r' % filename)
def reset(self, *args):
@@ -2209,7 +2209,7 @@
self._root.after(20, self._animate_strategy)
def _apply_strategy(self):
- RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/drawchart.py
new_edge = self._cpstep.next()
+ new_edge = next(self._cpstep)
if new_edge is not None:
self._show_new_edge(new_edge)
@@ -2281,12 +2281,12 @@
sent = 'John ate the cake on the table'
tokens = list(tokenize.whitespace(sent))
- print 'grammar= ('
+ print('grammar= (')
for rule in grammar.productions():
- print ' ', repr(rule)+','
- print ')'
- print 'tokens = %r' % tokens
- print 'Calling "ChartDemo(grammar, tokens)"...'
+ print(' ', repr(rule)+',')
+ print(')')
+ print('tokens = %r' % tokens)
+ print('Calling "ChartDemo(grammar, tokens)"...')
ChartDemo(grammar, tokens).mainloop()
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/tagging/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/treeview.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/treeview.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/treeview.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/treeview.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/treeview.py (refactored)
@@ -1,4 +1,4 @@
-import Tkinter
+import tkinter
from nltk.draw import TreeWidget
from nltk.draw import CanvasFrame
@@ -7,32 +7,32 @@
class TreeView:
def __init__(self, trees, root=None):
if len(trees) == 0:
- print "No trees to display."
+ print("No trees to display.")
return
newroot = False
if root is None:
- root = Tkinter.Tk()
+ root = tkinter.Tk()
window = root
newroot = True
else:
- window = Tkinter.Toplevel(root)
+ window = tkinter.Toplevel(root)
window.title("Parse Tree")
window.geometry("600x400")
self.cf = CanvasFrame(window)
self.cf.pack(side='top', expand=1, fill='both')
- buttons = Tkinter.Frame(window)
+ buttons = tkinter.Frame(window)
buttons.pack(side='bottom', fill='x')
- self.spin = Tkinter.Spinbox(buttons, from_=1, to=len(trees),
+ self.spin = tkinter.Spinbox(buttons, from_=1, to=len(trees),
command=self.showtree, width=3)
if len(trees) > 1: self.spin.pack(side='left')
- self.label = Tkinter.Label(buttons, text="of %d" % len(trees))
+ self.label = tkinter.Label(buttons, text="of %d" % len(trees))
if len(trees) > 1: self.label.pack(side='left')
- self.done = Tkinter.Button(buttons, text="Done", command=window.destroy)
+ self.done = tkinter.Button(buttons, text="Done", command=window.destroy)
self.done.pack(side='right')
- self.printps = Tkinter.Button(buttons, text="Print to Postscript", command=self.cf.print_to_file)
+ self.printps = tkinter.Button(buttons, text="Print to Postscript", command=self.cf.print_to_file)
self.printps.pack(side='right')
self.trees = trees
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/tree.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/testw.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/testw.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/testw.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/testw.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/testw.py (refactored)
@@ -1,14 +1,14 @@
-from featurechart import *
-from treeview import *
+from .featurechart import *
+from .treeview import *
def demo():
cp = load_earley('lab3-slash.cfg', trace=1)
trees = cp.parse('Mary walks')
for tree in trees:
- print tree
+ print(tree)
sem = tree[0].node['sem']
- print sem
- print sem.skolemise().clauses()
+ print(sem)
+ print(sem.skolemise().clauses())
return sem.skolemise().clauses()
#run_profile()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/test.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/test.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/test.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/test.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/test.py (refactored)
@@ -1,14 +1,14 @@
-from featurechart import *
-from treeview import *
+from .featurechart import *
+from .treeview import *
def demo():
cp = load_earley('lab3-slash.cfg', trace=0)
trees = cp.parse('Mary sees a dog in Noosa')
for tree in trees:
- print tree
+ print(tree)
sem = tree[0].node['sem']
- print sem
- print sem.skolemise().clauses()
+ print(sem)
+ print(sem.skolemise().clauses())
return sem.skolemise().clauses()
#run_profile()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/logic.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/logic.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/logic.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/logic.py (refactored)
@@ -1,7 +1,7 @@
# Natural Language Toolkit: Logic
from nltk.utilities import Counter
-from featurelite import SubstituteBindingsMixin, FeatureI
-from featurelite import Variable as FeatureVariable
+from .featurelite import SubstituteBindingsMixin, FeatureI
+from .featurelite import Variable as FeatureVariable
_counter = Counter()
def unique_variable(counter=None):
@@ -137,7 +137,7 @@
raise NotImplementedError
def __hash__(self):
- raise NotImplementedError, self.__class__
+ raise NotImplementedError(self.__class__)
def normalize(self):
if hasattr(self, '_normalized'): return self._normalized
@@ -612,7 +612,7 @@
@returns: a parsed Expression
"""
self.feed(data)
- result = self.next()
+ result = next(self)
return result
def process(self):
@@ -629,7 +629,7 @@
whether the token will be removed from the buffer; setting it to
0 gives lookahead capability."""
if self.buffer == '':
- raise Error, "end of stream"
+ raise Error("end of stream")
tok = None
buffer = self.buffer
while not tok:
@@ -654,7 +654,7 @@
TOKENS.extend(Parser.BOOL)
return token not in TOKENS
- def next(self):
+ def __next__(self):
"""Parse the next complete expression from the stream and return it."""
tok = self.token()
@@ -678,8 +678,8 @@
tok = self.token()
if tok != Parser.DOT:
- raise Error, "parse error, unexpected token: %s" % tok
- term = self.next()
+ raise Error("parse error, unexpected token: %s" % tok)
+ term = next(self)
accum = factory(Variable(vars.pop()), term)
while vars:
accum = factory(Variable(vars.pop()), accum)
@@ -687,12 +687,12 @@
elif tok == Parser.OPEN:
# Expression is an application expression: (M N)
- first = self.next()
- second = self.next()
+ first = next(self)
+ second = next(self)
exps = []
while self.token(0) != Parser.CLOSE:
# Support expressions like: (M N P) == ((M N) P)
- exps.append(self.next())
+ exps.append(next(self))
tok = self.token() # swallow the close token
assert tok == Parser.CLOSE
if isinstance(second, Operator):
@@ -721,7 +721,7 @@
# Expression is a simple variable expression: x
return VariableExpression(Variable(tok))
else:
- raise Error, "parse error, unexpected token: %s" % tok
+ raise Error("parse error, unexpected token: %s" % tok)
# This is intended to be overridden, so that you can derive a Parser class
# that constructs expressions using your subclasses. So far we only need
@@ -762,7 +762,7 @@
ApplicationExpression(XZ, Y))))
O = LambdaExpression(x, LambdaExpression(y, XY))
N = ApplicationExpression(LambdaExpression(x, XA), I)
- T = Parser('\\x y.(x y z)').next()
+ T = next(Parser('\\x y.(x y z)'))
return [X, XZ, XYZ, I, K, L, S, B, C, O, N, T]
def demo():
@@ -771,21 +771,21 @@
P = VariableExpression(p)
Q = VariableExpression(q)
for l in expressions():
- print "Expression:", l
- print "Variables:", l.variables()
- print "Free:", l.free()
- print "Subterms:", l.subterms()
- print "Simplify:",l.simplify()
+ print("Expression:", l)
+ print("Variables:", l.variables())
+ print("Free:", l.free())
+ print("Subterms:", l.subterms())
+ print("Simplify:",l.simplify())
la = ApplicationExpression(ApplicationExpression(l, P), Q)
las = la.simplify()
- print "Apply and sRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/logic.py
implify: %s -> %s" % (la, las)
- ll = Parser(str(l)).next()
- print 'l is:', l
- print 'll is:', ll
+ print("Apply and simplify: %s -> %s" % (la, las))
+ ll = next(Parser(str(l)))
+ print('l is:', l)
+ print('ll is:', ll)
assert l.equals(ll)
- print "Serialize and reparse: %s -> %s" % (l, ll)
- print "Variables:", ll.variables()
- print "Normalize: %s" % ll.normalize()
+ print("Serialize and reparse: %s -> %s" % (l, ll))
+ print("Variables:", ll.variables())
+ print("Normalize: %s" % ll.normalize())
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/interact.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/interact.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/interact.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/interact.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/interact.py (refactored)
@@ -1,5 +1,5 @@
-from featurechart import *
-from logic import Counter
+from .featurechart import *
+from .logic import Counter
import sys
def interact(grammar_filename, trace=2):
@@ -14,10 +14,10 @@
# Read a line and parse it.
trees = cp.parse(line)
if len(trees) == 0:
- print "I don't understand."
+ print("I don't understand.")
continue
elif len(trees) > 1:
- print "That was ambiguous, but I'll guess at what you meant."
+ print("That was ambiguous, but I'll guess at what you meant.")
# Extract semantic information from the parse tree.
tree = trees[0]
@@ -36,13 +36,13 @@
skolem = skolem.replace_unique(var, counter)
if trace > 0:
- print tree
- print 'Semantic value:', skolem
+ print(tree)
+ print('Semantic value:', skolem)
clauses = skolem.clauses()
if trace > 1:
- print "Got these clauses:"
+ print("Got these clauses:")
for clause in clauses:
- print '\t', clause
+ print('\t', clause)
if pos == 'S':
# Handle statements
@@ -68,11 +68,11 @@
if success:
# answer
answer = bindings.get('wh', 'Yes.')
- print answer['variable']['name']
+ print(answer['variable']['name'])
else:
# This is an open world without negation, so negative answers
# aren't possible.
- print "I don't know."
+ print("I don't know.")
def demo():
interact('lab3-slash.cfg', trace=2)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurelite.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurelite.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurelite.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurelite.py (refactored)
@@ -91,7 +91,7 @@
class FeatureI(object):
def __init__(self):
- raise TypeError, "FeatureI is an abstract interface"
+ raise TypeError("FeatureI is an abstract interface")
class _FORWARD(object):
"""
@@ -102,7 +102,7 @@
instantiated.
"""
def __init__(self):
- raise TypeError, "The _FORWARD class is not meant to be instantiated"
+ raise TypeError("The _FORWARD class is not meant to be instantiated")
class Variable(object):
"""
@@ -260,7 +260,7 @@
# discard Variables which don't look like FeatureVariables
if varstr.startswith('?'):
var = makevar(varstr)
- if bindings.has_key(var.name()):
+ if var.name() in bindings:
newval = newval.replace(semvar, bindings[var.name()])
return newval
@@ -278,13 +278,13 @@
if isMapping(obj): return obj
dict = {}
dict['__class__'] = obj.__class__.__name__
- for (key, value) in obj.__dict__.items():
+ for (key, value) in list(obj.__dict__.items()):
dict[key] = object_to_features(value)
return dict
def variable_representer(dumper, var):
"Output variables in YAML as ?name."
- return dumper.represent_scalar(u'!var', u'?%s' % var.name())
+ return dumper.represent_scalar('!var', '?%s' % var.name())
yaml.add_representer(Variable, variable_representer)
def variable_constructor(loader, node):
@@ -292,8 +292,8 @@
value = loader.construct_scalar(node)
name = value[1:]
return Variable(name)
-yaml.add_constructor(u'!var', variable_constructor)
-yaml.add_implicit_resolver(u'!var', re.compile(r'^\?\w+$'))
+yaml.add_constructor('!var', variable_constructor)
+yaml.add_implicit_resolver('!var', re.compile(r'^\?\w+$'))
def _copy_and_bind(feature, bindings, memo=None):
"""
@@ -305,14 +305,14 @@
if memo is None: memo = {}
if id(feature) in memo: return memo[id(feature)]
if isinstance(feature, Variable) and bindings is not None:
- if not bindings.has_key(feature.name()):
+ if feature.name() not in bindings:
bindings[feature.name()] = feature.copy()
result = _copy_and_bind(bindings[feature.name()], None, memo)
else:
if isMapping(feature):
# Construct a new object of the same class
result = feature.__class__()
- for (key, value) in feature.items():
+ for (key, value) in list(feature.items()):
result[key] = _copy_and_bind(value, bindings, memo)
elif isinstance(feature, SubstituteBindingsI):
if bindings is not None:
@@ -629,19 +629,19 @@
if memo is None: memo = {}
copymemo = {}
- if memo.has_key((id(feature1), id(feature2))):
+ if (id(feature1), id(feature2)) in memo:
result = memo[id(feature1), id(feature2)]
if result is UnificationFailure:
if trace > 2:
- print '(cached) Unifying: %r + %r --> [fail]' % (feature1, feature2)
+ print('(cached) Unifying: %r + %r --> [fail]' % (feature1, feature2))
raise result()
if trace > 2:
- print '(cached) Unifying: %r + %r --> ' % (feature1, feature2),
- print repr(result)
+ print('(cached) Unifying: %r + %r --> ' % (feature1, feature2), end=' ')
+ print(repr(result))
return result
if trace > 1:
- print 'Unifying: %r + %r --> ' % (feature1, feature2),
+ print('Unifying: %r + %r --> ' % (feature1, feature2), end=' ')
# Make copies of the two structures (since the unification algorithm is
# destructive). Use the same memo, to preserve reentrance links between
@@ -650,7 +650,7 @@
copy2 = _copy_and_bind(feature2, bindings2, copymemo)
# Preserve links between bound variables and the two feature structures.
for b in (bRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurelite.py
indings1, bindings2):
- for (vname, value) in b.items():
+ for (vname, value) in list(b.items()):
value_id = id(value)
if value_id in copymemo:
b[vname] = copymemo[value_id]
@@ -660,7 +660,7 @@
unified = _destructively_unify(copy1, copy2, bindings1, bindings2, memo,
fail)
except UnificationFailure:
- if trace > 1: print '[fail]'
+ if trace > 1: print('[fail]')
memo[id(feature1), id(feature2)] = UnificationFailure
raise
@@ -672,9 +672,9 @@
_lookup_values(bindings2, {}, remove=True)
if trace > 1:
- print repr(unified)
+ print(repr(unified))
elif trace > 0:
- print 'Unifying: %r + %r --> %r' % (feature1, feature2, repr(unified))
+ print('Unifying: %r + %r --> %r' % (feature1, feature2, repr(unified)))
memo[id(feature1), id(feature2)] = unified
return unified
@@ -690,11 +690,11 @@
and C{other} are undefined.
"""
if depth > 50:
- print "Infinite recursion in this unification:"
- print show(dict(feature1=feature1, feature2=feature2,
- bindings1=bindings1, bindings2=bindings2, memo=memo))
- raise ValueError, "Infinite recursion in unification"
- if memo.has_key((id(feature1), id(feature2))):
+ print("Infinite recursion in this unification:")
+ print(show(dict(feature1=feature1, feature2=feature2,
+ bindings1=bindings1, bindings2=bindings2, memo=memo)))
+ raise ValueError("Infinite recursion in unification")
+ if (id(feature1), id(feature2)) in memo:
result = memo[id(feature1), id(feature2)]
if result is UnificationFailure: raise result()
unified = _do_unify(feature1, feature2, bindings1, bindings2, memo, fail,
@@ -737,9 +737,9 @@
# At this point, we know they're both mappings.
# Do the destructive part of unification.
- while feature2.has_key(_FORWARD): feature2 = feature2[_FORWARD]
+ while _FORWARD in feature2: feature2 = feature2[_FORWARD]
if feature1 is not feature2: feature2[_FORWARD] = feature1
- for (fname, val2) in feature2.items():
+ for (fname, val2) in list(feature2.items()):
if fname == _FORWARD: continue
val1 = feature1.get(fname)
feature1[fname] = _destructively_unify(val1, val2, bindings1,
@@ -752,12 +752,12 @@
the target of its forward pointer (to preserve reentrance).
"""
if not isMapping(feature): return
- if visited.has_key(id(feature)): return
+ if id(feature) in visited: return
visited[id(feature)] = True
- for fname, fval in feature.items():
+ for fname, fval in list(feature.items()):
if isMapping(fval):
- while fval.has_key(_FORWARD):
+ while _FORWARD in fval:
fval = fval[_FORWARD]
feature[fname] = fval
_apply_forwards(fval, visited)
@@ -789,10 +789,10 @@
else:
return var.forwarded_self()
if not isMapping(mapping): return mapping
- if visited.has_key(id(mapping)): return mapping
+ if id(mapping) in visited: return mapping
visited[id(mapping)] = True
- for fname, fval in mapping.items():
+ for fname, fval in list(mapping.items()):
if isMapping(fval):
_lookup_values(fval, visited)
elif isinstance(fval, Variable):
@@ -813,9 +813,9 @@
Replace any feature structures that have been forwarded by their new
identities.
"""
- for (key, value) in bindings.items():
- if isMapping(value) and value.has_key(_FORWARD):
- while value.has_key(_FORWARD):
+ for (key, value) in list(bindings.items()):
+ if isMapping(value) and _FORWARD in value:
+ while _FORWARD in value:
value = value[_FORWARD]
bindings[key] = value
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurechart.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurechart.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurechart.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurechart.py (refactored)
@@ -13,11 +13,11 @@
"""
import yaml
-from chart import *
-from category import *
-import cfg
-
-from featurelite import *
+from .chart import *
+from .category import *
+from . import cfg
+
+from .featurelite import *
def load_earley(filename, trace=1):
"""
@@ -125,7 +125,7 @@
@return: the value of the right-hand side with variables set.
@rtype: C{Category}
"""
- return tuple(apply(x, self._vars) for x in TreeEdge.rhs(self))
+ return tuple(x(*self._vars) for x in TreeEdge.rhs(self))
def orig_rhs(self):
"""
@@ -160,7 +160,7 @@
left_bindings = left_edge.vars().copy()
right_bindings = right_edge.vars().copy()
try:
- unified = unify(left_edge.next(), right_edge.lhs(), left_bindings,
+ unified = unify(next(left_edge), right_edge.lhs(), left_bindings,
right_bindings, memo=self.unify_memo, trace=self.trace-2)
if isinstance(unified, Category): unified.freeze()
except UnificationFailure: return
@@ -213,7 +213,7 @@
for prod in grammar.productions():
bindings = edge.vars().copy()
try:
- unified = unify(edge.next(), prod.lhs(), bindings, {},
+ unified = unify(next(edge), prod.lhs(), bindings, {},
memo=self.unify_memo, trace=self.trace-2)
if isinstance(unified, Category): unified.freeze()
except UnificationFailure:
@@ -258,7 +258,7 @@
# Width, for printing trace edges.
#w = 40/(chart.num_leaves()+1)
w = 2
- if self._trace > 0: print ' '*9, chart.pp_leaves(w)
+ if self._trace > 0: print(' '*9, chart.pp_leaves(w))
# Initialize the chart with a special "starter" edge.
root = GrammarCategory(pos='[INIT]')
@@ -272,7 +272,7 @@
#scanner = FeatureScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
- if self._trace > 1: print 'Processing queue %d' % end
+ if self._trace > 1: print('Processing queue %d' % end)
# Scanner rule substitute, i.e. this is being used in place
# of a proper FeatureScannerRule at the moment.
@@ -285,14 +285,14 @@
{})
chart.insert(new_pos_edge, (new_leaf_edge,))
if self._trace > 0:
- print 'Scanner ', chart.pp_edge(new_pos_edge,w)
+ print('Scanner ', chart.pp_edge(new_pos_edge,w))
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 1:
- print 'Predictor', chart.pp_edge(e,w)
+ print('Predictor', chart.pp_edge(e,w))
#if edge.is_incomplete():
# for e in scanner.apply(chart, grammar, edge):
# if self._trace > 0:
@@ -300,7 +300,7 @@
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Completer', chart.pp_edge(e,w)
+ print('Completer', chart.pp_edge(e,w))
# Output a list of complete parses.
return chart.parses(root)
@@ -348,14 +348,14 @@
return earley_lexicon.get(word.upper(), [])
sent = 'I saw John with a dog with my cookie'
- print "Sentence:\n", sent
+ print("Sentence:\n", sent)
from nltk import tokenize
tokens = list(tokenize.whitespace(sent))
t = time.time()
cp = FeatureEarleyChartParse(earley_grammar, lexicon, trace=1)
trees = cp.get_parse_list(tokens)
- print "Time: %s" % (time.time() - t)
- RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/featurechart.py
for tree in trees: print tree
+ print("Time: %s" % (time.time() - t))
+ for tree in trees: print(tree)
def run_profile():
import profile
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/chart.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/chart.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/chart.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/chart.py (refactored)
@@ -9,7 +9,7 @@
#
# $Id: chart.py 4157 2007-02-28 09:56:25Z stevenbird $
-from __init__ import *
+from .__init__ import *
from nltk import cfg, Tree
"""
@@ -162,7 +162,7 @@
"""
raise AssertionError('EdgeI is an abstract interface')
- def next(self):
+ def __next__(self):
"""
@return: The element of this edge's right-hand side that
immediately follows its dot.
@@ -271,7 +271,7 @@
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
- def next(self):
+ def __next__(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
@@ -334,7 +334,7 @@
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
- def next(self): return None
+ def __next__(self): return None
# Comparisons & hashing
def __cmp__(self, other):
@@ -487,12 +487,12 @@
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
- restr_keys = restrictions.keys()
+ restr_keys = list(restrictions.keys())
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
- if not self._indexes.has_key(restr_keys):
+ if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = [restrictions[k] for k in restr_keys]
return iter(self._indexes[restr_keys].get(tuple(vals), []))
@@ -505,7 +505,7 @@
# Make sure it's a valid index.
for k in restr_keys:
if not hasattr(EdgeI, k):
- raise ValueError, 'Bad restriction: %s' % k
+ raise ValueError('Bad restriction: %s' % k)
# Create the index.
self._indexes[restr_keys] = {}
@@ -537,12 +537,12 @@
C{child_pointer_list} with C{edge}.
"""
# Is it a new edge?
- if not self._edge_to_cpls.has_key(edge):
+ if edge not in self._edge_to_cpls:
# Add it to the list of edges.
self._edges.append(edge)
# Register with indexes
- for (restr_keys, index) in self._indexes.items():
+ for (restr_keys, index) in list(self._indexes.items()):
vals = [getattr(edge, k)() for k in restr_keys]
index = self._indexes[restr_keys]
index.setdefault(tuple(vals),[]).append(edge)
@@ -551,7 +551,7 @@
cpls = self._edge_to_cpls.setdefault(edge,{})
child_pointer_list = tuple(child_pointer_list)
- if cpls.has_key(child_pointer_list):
+ if child_pointer_list in cpls:
# We've already got this CPL; return false.
return False
else:
@@ -601,7 +601,7 @@
than once, we can reuse the same trees.
"""
# If we've seen this edge before, then reuse our old answer.
- if memo.has_key(edge): return memo[edge]
+ if edge in memo: return memo[edge]
trees = []
@@ -677,7 +677,7 @@
been used to form this edge.
"""
# Make a copy, in case they modify it.
- return self._edge_to_cpls.get(edge, {}).keys()
+ return list(self._edge_to_cpls.get(edge, {}).keys())
#////////////////////////////////////////////////////////////
# Display
@@ -839,7 +839,7 @@
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_iter(self, chart, grammar, *edges):
"""
@@ -854,7 +854,7 @@
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_everywhere(self, chart, grammar):
"""
@@ -864,7 +864,7 @@
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_everywhere_iter(self, chart, grammar):
"""
@@ -875,7 +875,7 @@
return.
@rtype: C{iter} of L{EdgeI}
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
class AbstractChartRule(object):
"""
@@ -893,7 +893,7 @@
# Subclasses must define apply_iter.
def apply_iter(self, chart, grammar, *edges):
- raise AssertionError, 'AbstractChartRule is an abstract class'
+ raise AssertionError('AbstractChartRule is an abstract class')
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
@@ -921,7 +921,7 @@
yield new_edge
else:
- raise AssertionError, 'NUM_EDGES>3 is not currently supported'
+ raise AssertionError('NUM_EDGES>3 is not currently supported')
# Default: delegate to apply_iter.
def apply(self, chart, grammar, *edges):
@@ -953,7 +953,7 @@
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
- left_edge.next() == right_edge.lhs() and
+ next(left_edge) == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
@@ -993,7 +993,7 @@
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
- lhs=edge1.next()):
+ lhs=next(edge1)):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
@@ -1052,7 +1052,7 @@
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
- for prod in grammar.productions(lhs=edge.next()):
+ for prod in grammar.productions(lhs=next(edge)):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
@@ -1071,7 +1071,7 @@
if edge.is_complete() or edge.end() >= chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
- if edge.next() == leaf:
+ if next(edge) == leaf:
new_edge = LeafEdge(leaf, index)
if chart.insert(new_edge, ()):
yield new_edge
@@ -1119,7 +1119,7 @@
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
- done = self._done.get((edge.next(), edge.end()), (None,None))
+ done = self._done.get((next(edge), edge.end()), (None,None))
if done[0] is chart and done[1] is grammar: return
# Add all the edges indicated by the top down expand rule.
@@ -1127,7 +1127,7 @@
yield e
# Record the fact that we've applied this rule.
- self._done[edge.next(), edge.end()] = (chart, grammar)
+ self._done[next(edge), edge.end()] = (chart, grammar)
def __str__(self): return 'Top Down Expand Rule'
@@ -1219,11 +1219,11 @@
if edge.is_complete() or edge.end()>=chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
- if edge.next() in self._word_to_pos.get(leaf, []):
+ if next(edge) in self._word_to_pos.get(leaf, []):
new_leaf_edge = LeafEdge(leaf, index)
if chart.insert(new_leaf_edge, ()):
yield new_leaf_edge
- new_pos_edge = TreeEdge((index,index+1), edge.next(),
+ new_pos_edge = TreeEdge((index,index+1), next(edge),
[leaf], 1)
if chart.insert(new_pos_edge, (new_leaf_edge,)):
yield new_pos_edge
@@ -1284,7 +1284,7 @@
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
- if self._trace > 0: print ' ', chart.pp_leaves(w)
+ if self._trace > 0: print(' ', chart.pp_leaves(w))
# Initialize the chart with a special "starter" edge.
root = cfg.Nonterminal('[INIT]')
@@ -1297,20 +1297,20 @@
scanner = ScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
- if self._trace > 1: print 'Processing queue %d' % end
+ if self._trace > 1: print('Processing queue %d' % end)
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Predictor', chart.pp_edge(e,w)
+ print('Predictor', chart.pp_edge(e,w))
if edge.is_incomplete():
for e in scanner.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Scanner ', chart.pp_edge(e,w)
+ print('Scanner ', chart.pp_edge(e,w))
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Completer', chart.pp_edge(e,w)
+ print('Completer', chart.pp_edge(e,w))
# Output a list of complete parses.
return chart.parses(grammar.start(), tree_class=tree_class)
@@ -1363,7 +1363,7 @@
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
- if self._trace > 0: print chart.pp_leaves(w)
+ if self._trace > 0: print(chart.pp_leaves(w))
edges_added = 1
while edges_added > 0:
@@ -1372,11 +1372,11 @@
edges_added_by_rule = 0
for e in rule.apply_everywhere(chart, grammar):
if self._trace > 0 and edges_added_by_rule == 0:
- print '%s:' % rule
+ print('%s:' % rule)
edges_added_by_rule += 1
- if self._trace > 1: print chart.pp_edge(e,w)
+ if self._trace > 1: print(chart.pp_edge(e,w))
if self._trace == 1 and edges_added_by_rule > 0:
- print ' - Added %d edges' % edges_added_by_rule
+ print(' - Added %d edges' % edges_added_by_rule)
edges_added += edges_added_by_rule
# Return a list of complete parses.
@@ -1438,14 +1438,14 @@
added with the current strategy and grammar.
"""
if self._chart is None:
- raise ValueError, 'Parser must be initialized first'
+ raise ValueError('Parser must be initialized first')
while 1:
self._restart = False
w = 50/(self._chart.num_leaves()+1)
for e in self._parse():
- if self._trace > 1: print self._current_chartrule
- if self._trace > 0: print self._chart.pp_edge(e,w)
+ if self._trace > 1: print(self._current_chartrule)
+ if self._trace > 0: print(self._chart.pp_edge(e,w))
yield e
if self._restart: break
else:
@@ -1579,23 +1579,23 @@
# Tokenize a sample sentence.
sent = 'I saw John with a dog with my cookie'
- print "SentenRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/chart.py
ce:\n", sent
+ print("Sentence:\n", sent)
from nltk import tokenize
tokens = list(tokenize.whitespace(sent))
- print tokens
+ print(tokens)
# Ask the user which parser to test
- print ' 1: Top-down chart parser'
- print ' 2: Bottom-up chart parser'
- print ' 3: Earley parser'
- print ' 4: Stepping chart parser (alternating top-down & bottom-up)'
- print ' 5: All parsers'
- print '\nWhich parser (1-5)? ',
+ print(' 1: Top-down chart parser')
+ print(' 2: Bottom-up chart parser')
+ print(' 3: Earley parser')
+ print(' 4: Stepping chart parser (alternating top-down & bottom-up)')
+ print(' 5: All parsers')
+ print('\nWhich parser (1-5)? ', end=' ')
choice = sys.stdin.readline().strip()
- print
+ print()
if choice not in '12345':
- print 'Bad parser number'
+ print('Bad parser number')
return
# Keep track of how long each parser takes.
@@ -1608,7 +1608,7 @@
parses = cp.get_parse_list(tokens)
times['top down'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the bottom-up parser, if requested.
if choice in ('2', '5'):
@@ -1617,7 +1617,7 @@
parses = cp.get_parse_list(tokens)
times['bottom up'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the earley, if requested.
if choice in ('3', '5'):
@@ -1626,7 +1626,7 @@
parses = cp.get_parse_list(tokens)
times['Earley parser'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the stepping parser, if requested.
if choice in ('4', '5'):
@@ -1634,24 +1634,24 @@
cp = SteppingChartParse(grammar, trace=1)
cp.initialize(tokens)
for i in range(5):
- print '*** SWITCH TO TOP DOWN'
+ print('*** SWITCH TO TOP DOWN')
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
- print '*** SWITCH TO BOTTOM UP'
+ print('*** SWITCH TO BOTTOM UP')
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['stepping'] = time.time()-t
assert len(cp.parses())==5, 'Not all parses found'
- for parse in cp.parses(): print parse
+ for parse in cp.parses(): print(parse)
# Print the times of all parsers:
- maxlen = max(len(key) for key in times.keys())
- format = '%' + `maxlen` + 's parser: %6.3fsec'
- times_items = times.items()
+ maxlen = max(len(key) for key in list(times.keys()))
+ format = '%' + repr(maxlen) + 's parser: %6.3fsec'
+ times_items = list(times.items())
times_items.sort(lambda a,b:cmp(a[1], b[1]))
for (parser, t) in times_items:
- print format % (parser, t)
+ print(format % (parser, t))
if __name__ == '__main__': demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/cfg.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/cfg.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/cfg.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/cfg.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/cfg.py (refactored)
@@ -226,8 +226,8 @@
@param rhs: The right-hand side of the new C{Production}.
@type rhs: sequence of (C{Nonterminal} and (terminal))
"""
- if isinstance(rhs, (str, unicode)):
- raise TypeError, 'production right hand side should be a list, not a string'
+ if isinstance(rhs, str):
+ raise TypeError('production right hand side should be a list, not a string')
self._lhs = lhs
self._rhs = tuple(rhs)
self._hash = hash((self._lhs, self._rhs))
@@ -385,7 +385,7 @@
"""
# Use _PARSE_RE to check that it's valid.
if not _PARSE_RE.match(s):
- raise ValueError, 'Bad production string'
+ raise ValueError('Bad production string')
# Use _SPLIT_RE to process it.
pieces = _SPLIT_RE.split(s)
pieces = [p for i,p in enumerate(pieces) if i%2==1]
@@ -407,9 +407,9 @@
if line.startswith('#') or line=='': continue
try: productions += parse_production(line)
except ValueError:
- raise ValueError, 'Unable to parse line %s' % linenum
+ raise ValueError('Unable to parse line %s' % linenum)
if len(productions) == 0:
- raise ValueError, 'No productions found!'
+ raise ValueError('No productions found!')
start = productions[0].lhs()
return Grammar(start, productions)
@@ -429,11 +429,11 @@
N, V, P, Det = cfg.nonterminals('N, V, P, Det')
VP_slash_NP = VP/NP
- print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]
- print ' S.symbol() =>', `S.symbol()`
- print
-
- print cfg.Production(S, [NP])
+ print('Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP])
+ print(' S.symbol() =>', repr(S.symbol()))
+ print()
+
+ print(cfg.Production(S, [NP]))
# Create some Grammar Productions
grammar = cfg.parse_grammar("""
@@ -453,11 +453,11 @@
P -> 'in'
""")
- print 'A Grammar:', `grammar`
- print ' grammar.start() =>', `grammar.start()`
- print ' grammar.productions() =>',
+ print('A Grammar:', repr(grammar))
+ print(' grammar.start() =>', repr(grammar.start()))
+ print(' grammar.productions() =>', end=' ')
# Use string.replace(...) is to line-wrap the output.
- print `grammar.productions()`.replace(',', ',\n'+' '*25)
- print
+ print(repr(grammar.productions()).replace(',', ',\n'+' '*25))
+ print()
if __name__ == '__main__': demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py (refactored)
@@ -10,11 +10,11 @@
#
# $Id: category.py 4162 2007-03-01 00:46:05Z stevenbird $
-import logic
+from . import logic
from nltk.cfg import *
#from kimmo import kimmo
-from featurelite import *
+from .featurelite import *
from copy import deepcopy
import yaml
# import nltk.yamltags
@@ -123,16 +123,16 @@
self._features[key] = value
def items(self):
- return self._features.items()
+ return list(self._features.items())
def keys(self):
- return self._features.keys()
+ return list(self._features.keys())
def values(self):
- return self._features.values()
+ return list(self._features.values())
def has_key(self, key):
- return self._features.has_key(key)
+ return key in self._features
def symbol(self):
"""
@@ -161,7 +161,7 @@
"""
@return: a list of all features that have values.
"""
- return self._features.keys()
+ return list(self._features.keys())
has_feature = has_key
@@ -179,7 +179,7 @@
@staticmethod
def _remove_unbound_vars(obj):
- for (key, value) in obj.items():
+ for (key, value) in list(obj.items()):
if isinstance(value, Variable):
del obj[key]
elif isinstance(value, (Category, dict)):
@@ -206,7 +206,7 @@
def _str(cls, obj, reentrances, reentrance_ids, normalize=False):
segments = []
- keys = obj.keys()
+ keys = list(obj.keys())
keys.sort()
for fname in keys:
if fname == cls.headname: continue
@@ -389,14 +389,14 @@
# Semantic value of the form '; return an ApplicationExpression
match = _PARSE_RE['application'].match(s, position)
if match is not None:
- fun = ParserSubstitute(match.group(2)).next()
- arg = ParserSubstitute(match.group(3)).next()
+ fun = next(ParserSubstitute(match.group(2)))
+ arg = next(ParserSubstitute(match.group(3)))
return logic.ApplicationExpressionSubst(fun, arg), match.end()
# other semantic value enclosed by '< >'; return value given by the lambda expr parser
match = _PARSE_RE['semantics'].match(s, position)
if match is not None:
- return ParserSubstitute(match.group(1)).next(), match.end()
+ return next(ParserSubstitute(match.group(1))), match.end()
# String value
if s[position] in "'\"":
@@ -455,11 +455,11 @@
try:
lhs, position = cls.inner_parse(s, position)
lhs = cls(lhs)
- except ValueError, e:
+ except ValueError as e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
- raise ValueError, estr
+ raise ValueError(estr)
lhs.freeze()
match = _PARSE_RE['arrow'].match(s, position)
@@ -473,11 +473,11 @@
try:
val, position = cls.inner_parse(s, position, {})
if isinstance(val, dict): val = cls(val)
- except ValueError, e:
+ except ValueError as e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
- raise ValueError, estr
+ raise ValueError(estr)
if isinstance(val, Category): val.freeze()
rhs.append(val)
position = _PARSE_RE['whitespace'].match(s, position).end()
@@ -519,7 +519,7 @@
def _str(cls, obj, reentrances, reentrance_ids, normalize=False):
segments = []
- keys = obj.keys()
+ keys = list(obj.keys())
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py
RefactoringTool: Warnings/messages while refactoring:
RefactoringTool: ### In file ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/category.py ###
RefactoringTool: Line 122: could not convert: raise "Cannot modify a frozen Category"
RefactoringTool: Python 3 does not support string exceptions
keys.sort()
for fname in keys:
if fname == cls.headname: continue
@@ -576,9 +576,9 @@
if slash_match is not None:
position = slash_match.end()
slash, position = GrammarCategory._parseval(s, position, reentrances)
- if isinstance(slash, basestring): slash = {'pos': slash}
+ if isinstance(slash, str): slash = {'pos': slash}
body['/'] = unify(body.get('/'), slash)
- elif not body.has_key('/'):
+ elif '/' not in body:
body['/'] = False
return cls(body), position
@@ -632,7 +632,7 @@
return lookup
def earley_parser(self, trace=1):
- from featurechart import FeatureEarleyChartParse
+ from .featurechart import FeatureEarleyChartParse
if self.kimmo is None: lexicon = self.earley_lexicon()
else: lexicon = self.kimmo_lexicon()
@@ -686,28 +686,28 @@
yaml.add_representer(GrammarCategory, GrammarCategory.to_yaml)
def demo():
- print "Category(pos='n', agr=dict(number='pl', gender='f')):"
- print
- print Category(pos='n', agr=dict(number='pl', gender='f'))
- print repr(Category(pos='n', agr=dict(number='pl', gender='f')))
- print
- print "GrammarCategory.parse('NP[sem=/NP'):"
- print
- print GrammarCategory.parse(r'NP[sem=]/NP')
- print repr(GrammarCategory.parse(r'NP[sem=]/NP'))
- print
- print "GrammarCategory.parse('?x/?x'):"
- print
- print GrammarCategory.parse('?x/?x')
- print repr(GrammarCategory.parse('?x/?x'))
- print
- print "GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'):"
- print
- print GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')
- print repr(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
- print
+ print("Category(pos='n', agr=dict(number='pl', gender='f')):")
+ print()
+ print(Category(pos='n', agr=dict(number='pl', gender='f')))
+ print(repr(Category(pos='n', agr=dict(number='pl', gender='f'))))
+ print()
+ print("GrammarCategory.parse('NP[sem=/NP'):")
+ print()
+ print(GrammarCategory.parse(r'NP[sem=]/NP'))
+ print(repr(GrammarCategory.parse(r'NP[sem=]/NP')))
+ print()
+ print("GrammarCategory.parse('?x/?x'):")
+ print()
+ print(GrammarCategory.parse('?x/?x'))
+ print(repr(GrammarCategory.parse('?x/?x')))
+ print()
+ print("GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'):")
+ print()
+ print(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
+ print(repr(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')))
+ print()
g = GrammarFile.read_file("speer.cfg")
- print g.grammar()
+ print(g.grammar())
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/batchtest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/batchtest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/batchtest.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/batchtest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/batchtest.py (refactored)
@@ -1,5 +1,5 @@
-from featurechart import *
-from treeview import *
+from .featurechart import *
+from .treeview import *
def demo():
cp = load_earley('gazdar6.cfg', trace=2)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/semantics/__init__.py (refactored)
@@ -131,7 +131,7 @@
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParse:
- raise AssertionError, "Abstract classes can't be instantiated"
+ raise AssertionError("Abstract classes can't be instantiated")
def parse(self, sentence):
return self.get_parse_list(sentence.split())
@@ -155,9 +155,9 @@
line = line.strip()
if not line: continue
if line.startswith('#'):
- print line
+ print(line)
continue
- print "Sentence:", line
+ print("Sentence:", line)
parses = self.parse(line)
- print "%d parses." % len(parses)
- for tree in parses: print tree
+ print("%d parses." % len(parses))
+ for tree in parses: print(tree)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/treeview.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/treeview.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/treeview.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/treeview.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/treeview.py (refactored)
@@ -1,4 +1,4 @@
-import Tkinter
+import tkinter
from nltk.draw import TreeWidget
from nltk.draw import CanvasFrame
@@ -7,32 +7,32 @@
class TreeView:
def __init__(self, trees, root=None):
if len(trees) == 0:
- print "No trees to display."
+ print("No trees to display.")
return
newroot = False
if root is None:
- root = Tkinter.Tk()
+ root = tkinter.Tk()
window = root
newroot = True
else:
- window = Tkinter.Toplevel(root)
+ window = tkinter.Toplevel(root)
window.title("Parse Tree")
window.geometry("600x400")
self.cf = CanvasFrame(window)
self.cf.pack(side='top', expand=1, fill='both')
- buttons = Tkinter.Frame(window)
+ buttons = tkinter.Frame(window)
buttons.pack(side='bottom', fill='x')
- self.spin = Tkinter.Spinbox(buttons, from_=1, to=len(trees),
+ self.spin = tkinter.Spinbox(buttons, from_=1, to=len(trees),
command=self.showtree, width=3)
if len(trees) > 1: self.spin.pack(side='left')
- self.label = Tkinter.Label(buttons, text="of %d" % len(trees))
+ self.label = tkinter.Label(buttons, text="of %d" % len(trees))
if len(trees) > 1: self.label.pack(side='left')
- self.done = Tkinter.Button(buttons, text="Done", command=window.destroy)
+ self.done = tkinter.Button(buttons, text="Done", command=window.destroy)
self.done.pack(side='right')
- self.printps = Tkinter.Button(buttons, text="Print to Postscript", command=self.cf.print_to_file)
+ self.printps = tkinter.Button(buttons, text="Print to Postscript", command=self.cf.print_to_file)
self.printps.pack(side='right')
self.trees = trees
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/tree.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/test.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/test.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/test.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/test.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/test.py (refactored)
@@ -1,10 +1,10 @@
-from featurechart import *
-from treeview import *
+from .featurechart import *
+from .treeview import *
def demo():
cp = load_earley('gazdar6.cfg', trace=2)
trees = cp.parse('the man who chased Fido returned')
- for tree in trees: print tree
+ for tree in trees: print(tree)
#run_profile()
if __name__ == '__main__': demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurelite.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurelite.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurelite.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurelite.py (refactored)
@@ -84,7 +84,7 @@
class FeatureI(object):
def __init__(self):
- raise TypeError, "FeatureI is an abstract interface"
+ raise TypeError("FeatureI is an abstract interface")
class _FORWARD(object):
"""
@@ -95,7 +95,7 @@
instantiated.
"""
def __init__(self):
- raise TypeError, "The _FORWARD class is not meant to be instantiated"
+ raise TypeError("The _FORWARD class is not meant to be instantiated")
class Variable(object):
"""
@@ -241,7 +241,7 @@
def variable_representer(dumper, var):
"Output variables in YAML as ?name."
- return dumper.represent_scalar(u'!var', u'?%s' % var.name())
+ return dumper.represent_scalar('!var', '?%s' % var.name())
yaml.add_representer(Variable, variable_representer)
def variable_constructor(loader, node):
@@ -249,8 +249,8 @@
value = loader.construct_scalar(node)
name = value[1:]
return Variable(name)
-yaml.add_constructor(u'!var', variable_constructor)
-yaml.add_implicit_resolver(u'!var', re.compile(r'^\?\w+$'))
+yaml.add_constructor('!var', variable_constructor)
+yaml.add_implicit_resolver('!var', re.compile(r'^\?\w+$'))
def _copy_and_bind(feature, bindings, memo=None):
"""
@@ -262,14 +262,14 @@
if memo is None: memo = {}
if id(feature) in memo: return memo[id(feature)]
if isinstance(feature, Variable) and bindings is not None:
- if not bindings.has_key(feature.name()):
+ if feature.name() not in bindings:
bindings[feature.name()] = feature.copy()
result = _copy_and_bind(bindings[feature.name()], None, memo)
else:
if isMapping(feature):
# Construct a new object of the same class
result = feature.__class__()
- for (key, value) in feature.items():
+ for (key, value) in list(feature.items()):
result[key] = _copy_and_bind(value, bindings, memo)
else: result = feature
memo[id(feature)] = result
@@ -579,19 +579,19 @@
if memo is None: memo = {}
copymemo = {}
- if memo.has_key((id(feature1), id(feature2))):
+ if (id(feature1), id(feature2)) in memo:
result = memo[id(feature1), id(feature2)]
if result is UnificationFailure:
if trace > 2:
- print '(cached) Unifying: %r + %r --> [fail]' % (feature1, feature2)
+ print('(cached) Unifying: %r + %r --> [fail]' % (feature1, feature2))
raise result()
if trace > 2:
- print '(cached) Unifying: %r + %r --> ' % (feature1, feature2),
- print repr(result)
+ print('(cached) Unifying: %r + %r --> ' % (feature1, feature2), end=' ')
+ print(repr(result))
return result
if trace > 1:
- print 'Unifying: %r + %r --> ' % (feature1, feature2),
+ print('Unifying: %r + %r --> ' % (feature1, feature2), end=' ')
# Make copies of the two structures (since the unification algorithm is
# destructive). Use the same memo, to preserve reentrance links between
@@ -600,7 +600,7 @@
copy2 = _copy_and_bind(feature2, bindings2, copymemo)
# Preserve links between bound variables and the two feature structures.
for b in (bindings1, bindings2):
- for (vname, value) in b.items():
+ for (vname, value) in list(b.items()):
value_id = id(value)
if value_id in copymemo:
b[vname] = copymemo[value_id]
@@ -610,7 +610,7 @@
unified = _destructively_unify(copy1, copy2, bindings1, bindings2, memo,
fail)
except UnificationFailure:
- if trace > 1: print '[fail]'
+ if trace > 1: print('[fail]')
memo[id(feature1), id(feature2)] = UnificationFailure
raise
@@ -622,9 +622,9 @@
_lookup_values(bindings2, {}, remove=True)
if trace > 1:
- print repr(unified)
+ printRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurelite.py
(repr(unified))
elif trace > 0:
- print 'Unifying: %r + %r --> %r' % (feature1, feature2, repr(unified))
+ print('Unifying: %r + %r --> %r' % (feature1, feature2, repr(unified)))
memo[id(feature1), id(feature2)] = unified
return unified
@@ -640,11 +640,11 @@
and C{other} are undefined.
"""
if depth > 50:
- print "Infinite recursion in this unification:"
- print show(dict(feature1=feature1, feature2=feature2,
- bindings1=bindings1, bindings2=bindings2, memo=memo))
- raise ValueError, "Infinite recursion in unification"
- if memo.has_key((id(feature1), id(feature2))):
+ print("Infinite recursion in this unification:")
+ print(show(dict(feature1=feature1, feature2=feature2,
+ bindings1=bindings1, bindings2=bindings2, memo=memo)))
+ raise ValueError("Infinite recursion in unification")
+ if (id(feature1), id(feature2)) in memo:
result = memo[id(feature1), id(feature2)]
if result is UnificationFailure: raise result()
unified = _do_unify(feature1, feature2, bindings1, bindings2, memo, fail,
@@ -687,9 +687,9 @@
# At this point, we know they're both mappings.
# Do the destructive part of unification.
- while feature2.has_key(_FORWARD): feature2 = feature2[_FORWARD]
+ while _FORWARD in feature2: feature2 = feature2[_FORWARD]
if feature1 is not feature2: feature2[_FORWARD] = feature1
- for (fname, val2) in feature2.items():
+ for (fname, val2) in list(feature2.items()):
if fname == _FORWARD: continue
val1 = feature1.get(fname)
feature1[fname] = _destructively_unify(val1, val2, bindings1,
@@ -702,12 +702,12 @@
the target of its forward pointer (to preserve reentrance).
"""
if not isMapping(feature): return
- if visited.has_key(id(feature)): return
+ if id(feature) in visited: return
visited[id(feature)] = True
- for fname, fval in feature.items():
+ for fname, fval in list(feature.items()):
if isMapping(fval):
- while fval.has_key(_FORWARD):
+ while _FORWARD in fval:
fval = fval[_FORWARD]
feature[fname] = fval
_apply_forwards(fval, visited)
@@ -739,10 +739,10 @@
else:
return var.forwarded_self()
if not isMapping(mapping): return mapping
- if visited.has_key(id(mapping)): return mapping
+ if id(mapping) in visited: return mapping
visited[id(mapping)] = True
- for fname, fval in mapping.items():
+ for fname, fval in list(mapping.items()):
if isMapping(fval):
_lookup_values(fval, visited)
elif isinstance(fval, Variable):
@@ -763,9 +763,9 @@
Replace any feature structures that have been forwarded by their new
identities.
"""
- for (key, value) in bindings.items():
- if isMapping(value) and value.has_key(_FORWARD):
- while value.has_key(_FORWARD):
+ for (key, value) in list(bindings.items()):
+ if isMapping(value) and _FORWARD in value:
+ while _FORWARD in value:
value = value[_FORWARD]
bindings[key] = value
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurechart.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurechart.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurechart.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurechart.py (refactored)
@@ -18,7 +18,7 @@
#from category import *
from nltk import cfg
-from featurelite import *
+from .featurelite import *
def load_earley(filename, trace=1):
"""
@@ -112,7 +112,7 @@
@return: the value of the left-hand side with variables set.
@rtype: C{Category}
"""
- return apply(TreeEdge.lhs(self), self._vars)
+ return TreeEdge.lhs(self)(*self._vars)
def orig_lhs(self):
"""
@@ -126,7 +126,7 @@
@return: the value of the right-hand side with variables set.
@rtype: C{Category}
"""
- return tuple(apply(x, self._vars) for x in TreeEdge.rhs(self))
+ return tuple(x(*self._vars) for x in TreeEdge.rhs(self))
def orig_rhs(self):
"""
@@ -161,7 +161,7 @@
left_bindings = left_edge.vars().copy()
right_bindings = right_edge.vars().copy()
try:
- unified = unify(left_edge.next(), right_edge.lhs(), left_bindings,
+ unified = unify(next(left_edge), right_edge.lhs(), left_bindings,
right_bindings, memo=self.unify_memo, trace=self.trace-2)
if isinstance(unified, Category): unified.freeze()
except UnificationFailure: return
@@ -211,7 +211,7 @@
for prod in grammar.productions():
bindings = edge.vars().copy()
try:
- unified = unify(edge.next(), prod.lhs(), bindings, {},
+ unified = unify(next(edge), prod.lhs(), bindings, {},
memo=self.unify_memo, trace=self.trace-2)
if isinstance(unified, Category): unified.freeze()
except UnificationFailure:
@@ -256,7 +256,7 @@
# Width, for printing trace edges.
#w = 40/(chart.num_leaves()+1)
w = 2
- if self._trace > 0: print ' '*9, chart.pp_leaves(w)
+ if self._trace > 0: print(' '*9, chart.pp_leaves(w))
# Initialize the chart with a special "starter" edge.
root = GrammarCategory(pos='[INIT]')
@@ -270,7 +270,7 @@
#scanner = FeatureScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
- if self._trace > 1: print 'Processing queue %d' % end
+ if self._trace > 1: print('Processing queue %d' % end)
# Scanner rule substitute, i.e. this is being used in place
# of a proper FeatureScannerRule at the moment.
@@ -283,14 +283,14 @@
{})
chart.insert(new_pos_edge, (new_leaf_edge,))
if self._trace > 0:
- print 'Scanner ', chart.pp_edge(new_pos_edge,w)
+ print('Scanner ', chart.pp_edge(new_pos_edge,w))
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 1:
- print 'Predictor', chart.pp_edge(e,w)
+ print('Predictor', chart.pp_edge(e,w))
#if edge.is_incomplete():
# for e in scanner.apply(chart, grammar, edge):
# if self._trace > 0:
@@ -298,7 +298,7 @@
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Completer', chart.pp_edge(e,w)
+ print('Completer', chart.pp_edge(e,w))
# Output a list of complete parses.
return chart.parses(root)
@@ -346,14 +346,14 @@
return earley_lexicon.get(word.upper(), [])
sent = 'I saw John with a dog with my cookie'
- print "Sentence:\n", sent
+ print("Sentence:\n", sent)
from nltk import tokenize
tokens = list(tokenize.whitespace(sent))
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/featurechart.py
t = time.time()
cp = FeatureEarleyChartParse(earley_grammar, lexicon, trace=1)
trees = cp.get_parse_list(tokens)
- print "Time: %s" % (time.time() - t)
- for tree in trees: print tree
+ print("Time: %s" % (time.time() - t))
+ for tree in trees: print(tree)
def run_profile():
import profile
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/chart.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/chart.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/chart.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/chart.py (refactored)
@@ -9,8 +9,8 @@
#
# $Id: chart.py 4157 2007-02-28 09:56:25Z stevenbird $
-from __init__ import *
-from tree import Tree
+from .__init__ import *
+from .tree import Tree
from nltk import cfg
"""
@@ -163,7 +163,7 @@
"""
raise AssertionError('EdgeI is an abstract interface')
- def next(self):
+ def __next__(self):
"""
@return: The element of this edge's right-hand side that
immediately follows its dot.
@@ -272,7 +272,7 @@
def dot(self): return self._dot
def is_complete(self): return self._dot == len(self._rhs)
def is_incomplete(self): return self._dot != len(self._rhs)
- def next(self):
+ def __next__(self):
if self._dot >= len(self._rhs): return None
else: return self._rhs[self._dot]
@@ -335,7 +335,7 @@
def dot(self): return 0
def is_complete(self): return True
def is_incomplete(self): return False
- def next(self): return None
+ def __next__(self): return None
# Comparisons & hashing
def __cmp__(self, other):
@@ -488,12 +488,12 @@
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
- restr_keys = restrictions.keys()
+ restr_keys = list(restrictions.keys())
restr_keys.sort()
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
- if not self._indexes.has_key(restr_keys):
+ if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = [restrictions[k] for k in restr_keys]
@@ -507,7 +507,7 @@
# Make sure it's a valid index.
for k in restr_keys:
if not hasattr(EdgeI, k):
- raise ValueError, 'Bad restriction: %s' % k
+ raise ValueError('Bad restriction: %s' % k)
# Create the index.
self._indexes[restr_keys] = {}
@@ -539,12 +539,12 @@
C{child_pointer_list} with C{edge}.
"""
# Is it a new edge?
- if not self._edge_to_cpls.has_key(edge):
+ if edge not in self._edge_to_cpls:
# Add it to the list of edges.
self._edges.append(edge)
# Register with indexes
- for (restr_keys, index) in self._indexes.items():
+ for (restr_keys, index) in list(self._indexes.items()):
vals = [getattr(edge, k)() for k in restr_keys]
index = self._indexes[restr_keys]
index.setdefault(tuple(vals),[]).append(edge)
@@ -553,7 +553,7 @@
cpls = self._edge_to_cpls.setdefault(edge,{})
child_pointer_list = tuple(child_pointer_list)
- if cpls.has_key(child_pointer_list):
+ if child_pointer_list in cpls:
# We've already got this CPL; return false.
return False
else:
@@ -600,7 +600,7 @@
than once, we can reuse the same trees.
"""
# If we've seen this edge before, then reuse our old answer.
- if memo.has_key(edge): return memo[edge]
+ if edge in memo: return memo[edge]
trees = []
@@ -676,7 +676,7 @@
been used to form this edge.
"""
# Make a copy, in case they modify it.
- return self._edge_to_cpls.get(edge, {}).keys()
+ return list(self._edge_to_cpls.get(edge, {}).keys())
#////////////////////////////////////////////////////////////
# Display
@@ -838,7 +838,7 @@
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_iter(self, chart, grammar, *edges):
"""
@@ -853,7 +853,7 @@
that should be passed to C{apply} is specified by the
L{NUM_EDGES} class variable.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_everywhere(self, chart, grammar):
"""
@@ -863,7 +863,7 @@
@rtype: C{list} of L{EdgeI}
@return: A list of the edges that were added.
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
def apply_everywhere_iter(self, chart, grammar):
"""
@@ -874,7 +874,7 @@
return.
@rtype: C{iter} of L{EdgeI}
"""
- raise AssertionError, 'ChartRuleI is an abstract interface'
+ raise AssertionError('ChartRuleI is an abstract interface')
class AbstractChartRule(object):
"""
@@ -892,7 +892,7 @@
# Subclasses must define apply_iter.
def apply_iter(self, chart, grammar, *edges):
- raise AssertionError, 'AbstractChartRule is an abstract class'
+ raise AssertionError('AbstractChartRule is an abstract class')
# Default: loop through the given number of edges, and call
# self.apply() for each set of edges.
@@ -920,7 +920,7 @@
yield new_edge
else:
- raise AssertionError, 'NUM_EDGES>3 is not currently supported'
+ raise AssertionError('NUM_EDGES>3 is not currently supported')
# Default: delegate to apply_iter.
def apply(self, chart, grammar, *edges):
@@ -952,7 +952,7 @@
def apply_iter(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
- left_edge.next() == right_edge.lhs() and
+ next(left_edge) == right_edge.lhs() and
left_edge.is_incomplete() and right_edge.is_complete()):
return
@@ -992,7 +992,7 @@
if edge1.is_incomplete():
# edge1 = left_edge; edge2 = right_edge
for edge2 in chart.select(start=edge1.end(), is_complete=True,
- lhs=edge1.next()):
+ lhs=next(edge1)):
for new_edge in fr.apply_iter(chart, grammar, edge1, edge2):
yield new_edge
else:
@@ -1051,7 +1051,7 @@
NUM_EDGES = 1
def apply_iter(self, chart, grammar, edge):
if edge.is_complete(): return
- for prod in grammar.productions(lhs=edge.next()):
+ for prod in grammar.productions(lhs=next(edge)):
new_edge = TreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
@@ -1070,7 +1070,7 @@
if edge.is_complete() or edge.end() >= chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
- if edge.next() == leaf:
+ if next(edge) == leaf:
new_edge = LeafEdge(leaf, index)
if chart.insert(new_edge, ()):
yield new_edge
@@ -1118,7 +1118,7 @@
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
- done = self._done.get((edge.next(), edge.end()), (None,None))
+ done = self._done.get((next(edge), edge.end()), (None,None))
if done[0] is chart and done[1] is grammar: return
# Add all the edges indicated by the top down expand rule.
@@ -1126,7 +1126,7 @@
yield e
# Record the fact that we've applied this rule.
- self._done[edge.next(), edge.end()] = (chart, grammar)
+ self._done[next(edge), edge.end()] = (chart, grammar)
def __str__(self): return 'Top Down Expand Rule'
@@ -1218,11 +1218,11 @@
if edge.is_complete() or edge.end()>=chart.num_leaves(): return
index = edge.end()
leaf = chart.leaf(index)
- if edge.next() in self._word_to_pos.get(leaf, []):
+ if next(edge) in self._word_to_pos.get(leaf, []):
new_leaf_edge = LeafEdge(leaf, index)
if chart.insert(new_leaf_edge, ()):
yield new_leaf_edge
- new_pos_edge = TreeEdge((index,index+1), edge.next(),
+ new_pos_edge = TreeEdge((index,index+1), next(edge),
[leaf], 1)
if chart.insert(new_pos_edge, (new_leaf_edge,)):
yield new_pos_edge
@@ -1283,7 +1283,7 @@
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
- if self._trace > 0: print ' ', chart.pp_leaves(w)
+ if self._trace > 0: print(' ', chart.pp_leaves(w))
# Initialize the chart with a special "starter" edge.
root = cfg.Nonterminal('[INIT]')
@@ -1296,20 +1296,20 @@
scanner = ScannerRule(self._lexicon)
for end in range(chart.num_leaves()+1):
- if self._trace > 1: print 'Processing queue %d' % end
+ if self._trace > 1: print('Processing queue %d' % end)
for edge in chart.select(end=end):
if edge.is_incomplete():
for e in predictor.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Predictor', chart.pp_edge(e,w)
+ print('Predictor', chart.pp_edge(e,w))
if edge.is_incomplete():
for e in scanner.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Scanner ', chart.pp_edge(e,w)
+ print('Scanner ', chart.pp_edge(e,w))
if edge.is_complete():
for e in completer.apply(chart, grammar, edge):
if self._trace > 0:
- print 'Completer', chart.pp_edge(e,w)
+ print('Completer', chart.pp_edge(e,w))
# Output a list of complete parses.
return chart.parses(grammar.start(), tree_class=tree_class)
@@ -1362,7 +1362,7 @@
# Width, for printing trace edges.
w = 50/(chart.num_leaves()+1)
- if self._trace > 0: print chart.pp_leaves(w)
+ if self._trace > 0: print(chart.pp_leaves(w))
edges_added = 1
while edges_added > 0:
@@ -1371,11 +1371,11 @@
edges_added_by_rule = 0
for e in rule.apply_everywhere(chart, grammar):
if self._trace > 0 and edges_added_by_rule == 0:
- print '%s:' % rule
+ print('%s:' % rule)
edges_added_by_rule += 1
- if self._trace > 1: print chart.pp_edge(e,w)
+ if self._trace > 1: print(chart.pp_edge(e,w))
if self._trace == 1 and edges_added_by_rule > 0:
- print ' - Added %d edges' % edges_added_by_rule
+ print(' - Added %d edges' % edges_added_by_rule)
edges_added += edges_added_by_rule
# Return a list of complete parses.
@@ -1437,14 +1437,14 @@
added with the current strategy and grammar.
"""
if self._chart is None:
- raise ValueError, 'Parser must be initialized first'
+ raise ValueError('Parser must be initialized first')
while 1:
self._restart = False
w = 50/(self._chart.num_leaves()+1)
for e in self._parse():
- if self._trace > 1: print self._current_chartrule
- if self._trace > 0: print self._chart.pp_edge(e,w)
+ if self._trace > 1: print(self._current_chartrule)
+ if self._trace > 0: print(self._chart.pp_edge(e,w))
yield e
if self._restart: break
else:
@@ -1578,23 +1578,23 @@
# Tokenize a sample sentence.
sent = 'I saw John with a dog with my cookie'
- print "Sentence:\n", sent
+ RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/chart.py
print("Sentence:\n", sent)
from nltk import tokenize
tokens = list(tokenize.whitespace(sent))
- print tokens
+ print(tokens)
# Ask the user which parser to test
- print ' 1: Top-down chart parser'
- print ' 2: Bottom-up chart parser'
- print ' 3: Earley parser'
- print ' 4: Stepping chart parser (alternating top-down & bottom-up)'
- print ' 5: All parsers'
- print '\nWhich parser (1-5)? ',
+ print(' 1: Top-down chart parser')
+ print(' 2: Bottom-up chart parser')
+ print(' 3: Earley parser')
+ print(' 4: Stepping chart parser (alternating top-down & bottom-up)')
+ print(' 5: All parsers')
+ print('\nWhich parser (1-5)? ', end=' ')
choice = sys.stdin.readline().strip()
- print
+ print()
if choice not in '12345':
- print 'Bad parser number'
+ print('Bad parser number')
return
# Keep track of how long each parser takes.
@@ -1607,7 +1607,7 @@
parses = cp.get_parse_list(tokens)
times['top down'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the bottom-up parser, if requested.
if choice in ('2', '5'):
@@ -1616,7 +1616,7 @@
parses = cp.get_parse_list(tokens)
times['bottom up'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the earley, if requested.
if choice in ('3', '5'):
@@ -1625,7 +1625,7 @@
parses = cp.get_parse_list(tokens)
times['Earley parser'] = time.time()-t
assert len(parses)==5, 'Not all parses found'
- for tree in parses: print tree
+ for tree in parses: print(tree)
# Run the stepping parser, if requested.
if choice in ('4', '5'):
@@ -1633,24 +1633,24 @@
cp = SteppingChartParse(grammar, trace=1)
cp.initialize(tokens)
for i in range(5):
- print '*** SWITCH TO TOP DOWN'
+ print('*** SWITCH TO TOP DOWN')
cp.set_strategy(TD_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
- print '*** SWITCH TO BOTTOM UP'
+ print('*** SWITCH TO BOTTOM UP')
cp.set_strategy(BU_STRATEGY)
for j, e in enumerate(cp.step()):
if j>20 or e is None: break
times['stepping'] = time.time()-t
assert len(cp.parses())==5, 'Not all parses found'
- for parse in cp.parses(): print parse
+ for parse in cp.parses(): print(parse)
# Print the times of all parsers:
- maxlen = max(len(key) for key in times.keys())
- format = '%' + `maxlen` + 's parser: %6.3fsec'
- times_items = times.items()
+ maxlen = max(len(key) for key in list(times.keys()))
+ format = '%' + repr(maxlen) + 's parser: %6.3fsec'
+ times_items = list(times.items())
times_items.sort(lambda a,b:cmp(a[1], b[1]))
for (parser, t) in times_items:
- print format % (parser, t)
+ print(format % (parser, t))
if __name__ == '__main__': demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/cfg.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/cfg.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/cfg.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/cfg.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/cfg.py (refactored)
@@ -226,8 +226,8 @@
@param rhs: The right-hand side of the new C{Production}.
@type rhs: sequence of (C{Nonterminal} and (terminal))
"""
- if isinstance(rhs, (str, unicode)):
- raise TypeError, 'production right hand side should be a list, not a string'
+ if isinstance(rhs, str):
+ raise TypeError('production right hand side should be a list, not a string')
self._lhs = lhs
self._rhs = tuple(rhs)
self._hash = hash((self._lhs, self._rhs))
@@ -385,7 +385,7 @@
"""
# Use _PARSE_RE to check that it's valid.
if not _PARSE_RE.match(s):
- raise ValueError, 'Bad production string'
+ raise ValueError('Bad production string')
# Use _SPLIT_RE to process it.
pieces = _SPLIT_RE.split(s)
pieces = [p for i,p in enumerate(pieces) if i%2==1]
@@ -407,9 +407,9 @@
if line.startswith('#') or line=='': continue
try: productions += parse_production(line)
except ValueError:
- raise ValueError, 'Unable to parse line %s' % linenum
+ raise ValueError('Unable to parse line %s' % linenum)
if len(productions) == 0:
- raise ValueError, 'No productions found!'
+ raise ValueError('No productions found!')
start = productions[0].lhs()
return Grammar(start, productions)
@@ -429,11 +429,11 @@
N, V, P, Det = cfg.nonterminals('N, V, P, Det')
VP_slash_NP = VP/NP
- print 'Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP]
- print ' S.symbol() =>', `S.symbol()`
- print
-
- print cfg.Production(S, [NP])
+ print('Some nonterminals:', [S, NP, VP, PP, N, V, P, Det, VP/NP])
+ print(' S.symbol() =>', repr(S.symbol()))
+ print()
+
+ print(cfg.Production(S, [NP]))
# Create some Grammar Productions
grammar = cfg.parse_grammar("""
@@ -453,11 +453,11 @@
P -> 'in'
""")
- print 'A Grammar:', `grammar`
- print ' grammar.start() =>', `grammar.start()`
- print ' grammar.productions() =>',
+ print('A Grammar:', repr(grammar))
+ print(' grammar.start() =>', repr(grammar.start()))
+ print(' grammar.productions() =>', end=' ')
# Use string.replace(...) is to line-wrap the output.
- print `grammar.productions()`.replace(',', ',\n'+' '*25)
- print
+ print(repr(grammar.productions()).replace(',', ',\n'+' '*25))
+ print()
if __name__ == '__main__': demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py (refactored)
@@ -11,10 +11,10 @@
# $Id: category.py 4162 2007-03-01 00:46:05Z stevenbird $
from nltk.semantics import logic
-from cfg import *
+from .cfg import *
from kimmo import kimmo
-from featurelite import *
+from .featurelite import *
from copy import deepcopy
import yaml
# import nltk.yamltags
@@ -130,16 +130,16 @@
self._features[key] = value
def items(self):
- return self._features.items()
+ return list(self._features.items())
def keys(self):
- return self._features.keys()
+ return list(self._features.keys())
def values(self):
- return self._features.values()
+ return list(self._features.values())
def has_key(self, key):
- return self._features.has_key(key)
+ return key in self._features
def symbol(self):
"""
@@ -168,7 +168,7 @@
"""
@return: a list of all features that have values.
"""
- return self._features.keys()
+ return list(self._features.keys())
has_feature = has_key
@@ -183,7 +183,7 @@
@staticmethod
def _remove_unbound_vars(obj):
- for (key, value) in obj.items():
+ for (key, value) in list(obj.items()):
if isinstance(value, Variable):
del obj[key]
elif isinstance(value, (Category, dict)):
@@ -210,7 +210,7 @@
def _str(cls, obj, reentrances, reentrance_ids):
segments = []
- keys = obj.keys()
+ keys = list(obj.keys())
keys.sort()
for fname in keys:
if fname == cls.headname: continue
@@ -391,14 +391,14 @@
# Semantic value of the form '; return an ApplicationExpression
match = _PARSE_RE['application'].match(s, position)
if match is not None:
- fun = ParserSubstitute(match.group(2)).next()
- arg = ParserSubstitute(match.group(3)).next()
+ fun = next(ParserSubstitute(match.group(2)))
+ arg = next(ParserSubstitute(match.group(3)))
return ApplicationExpressionSubst(fun, arg), match.end()
# other semantic value enclosed by '< >'; return value given by the lambda expr parser
match = _PARSE_RE['semantics'].match(s, position)
if match is not None:
- return ParserSubstitute(match.group(1)).next(), match.end()
+ return next(ParserSubstitute(match.group(1))), match.end()
# String value
if s[position] in "'\"":
@@ -457,11 +457,11 @@
try:
lhs, position = cls.inner_parse(s, position)
lhs = cls(lhs)
- except ValueError, e:
+ except ValueError as e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
- raise ValueError, estr
+ raise ValueError(estr)
lhs.freeze()
match = _PARSE_RE['arrow'].match(s, position)
@@ -475,11 +475,11 @@
try:
val, position = cls.inner_parse(s, position, {})
if isinstance(val, dict): val = cls(val)
- except ValueError, e:
+ except ValueError as e:
estr = ('Error parsing field structure\n\n\t' +
s + '\n\t' + ' '*e.args[1] + '^ ' +
'Expected %s\n' % e.args[0])
- raise ValueError, estr
+ raise ValueError(estr)
if isinstance(val, Category): val.freeze()
rhs.append(val)
position = _PARSE_RE['whitespace'].match(s, position).end()
@@ -521,7 +521,7 @@
def _str(cls, obj, reentrances, reentrance_ids):
segments = []
- keys = obj.keys()
+ keys = list(obj.keys())
keys.sort()
for fname in kRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py
RefactoringTool: Warnings/messages while refactoring:
RefactoringTool: ### In file ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/category.py ###
RefactoringTool: Line 129: could not convert: raise "Cannot modify a frozen Category"
RefactoringTool: Python 3 does not support string exceptions
eys:
if fname == cls.headname: continue
@@ -576,9 +576,9 @@
if slash_match is not None:
position = slash_match.end()
slash, position = GrammarCategory._parseval(s, position, reentrances)
- if isinstance(slash, basestring): slash = {'pos': slash}
+ if isinstance(slash, str): slash = {'pos': slash}
body['/'] = unify(body.get('/'), slash)
- elif not body.has_key('/'):
+ elif '/' not in body:
body['/'] = False
return cls(body), position
@@ -652,7 +652,7 @@
return lookup
def earley_parser(self, trace=1):
- from featurechart import FeatureEarleyChartParse
+ from .featurechart import FeatureEarleyChartParse
if self.kimmo is None: lexicon = self.earley_lexicon()
else: lexicon = self.kimmo_lexicon()
@@ -706,28 +706,28 @@
yaml.add_representer(GrammarCategory, GrammarCategory.to_yaml)
def demo():
- print "Category(pos='n', agr=dict(number='pl', gender='f')):"
- print
- print Category(pos='n', agr=dict(number='pl', gender='f'))
- print repr(Category(pos='n', agr=dict(number='pl', gender='f')))
- print
- print "GrammarCategory.parse('NP/NP'):"
- print
- print GrammarCategory.parse('NP/NP')
- print repr(GrammarCategory.parse('NP/NP'))
- print
- print "GrammarCategory.parse('?x/?x'):"
- print
- print GrammarCategory.parse('?x/?x')
- print repr(GrammarCategory.parse('?x/?x'))
- print
- print "GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'):"
- print
- print GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')
- print repr(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
- print
+ print("Category(pos='n', agr=dict(number='pl', gender='f')):")
+ print()
+ print(Category(pos='n', agr=dict(number='pl', gender='f')))
+ print(repr(Category(pos='n', agr=dict(number='pl', gender='f'))))
+ print()
+ print("GrammarCategory.parse('NP/NP'):")
+ print()
+ print(GrammarCategory.parse('NP/NP'))
+ print(repr(GrammarCategory.parse('NP/NP')))
+ print()
+ print("GrammarCategory.parse('?x/?x'):")
+ print()
+ print(GrammarCategory.parse('?x/?x'))
+ print(repr(GrammarCategory.parse('?x/?x')))
+ print()
+ print("GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'):")
+ print()
+ print(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]'))
+ print(repr(GrammarCategory.parse('VP[+fin, agr=?x, tense=past]/NP[+pl, agr=?x]')))
+ print()
g = GrammarFile.read_file("speer.cfg")
- print g.grammar()
+ print(g.grammar())
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/parse/__init__.py (refactored)
@@ -131,7 +131,7 @@
"""
# Make sure we're not directly instantiated:
if self.__class__ == AbstractParse:
- raise AssertionError, "Abstract classes can't be instantiated"
+ raise AssertionError("Abstract classes can't be instantiated")
def parse(self, sentence):
return self.get_parse_list(sentence.split())
@@ -155,11 +155,11 @@
line = line.strip()
if not line: continue
if line.startswith('#'):
- print line
+ print(line)
continue
- print "Sentence:", line
+ print("Sentence:", line)
parses = self.parse(line)
- print "%d parses." % len(parses)
- for tree in parses: print tree
+ print("%d parses." % len(parses))
+ for tree in parses: print(tree)
from nltk.parse import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/rules.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/rules.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/rules.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/rules.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/rules.py (refactored)
@@ -1,7 +1,7 @@
from nltk.parse import Tree
-from fsa import FSA
+from .fsa import FSA
from nltk import tokenize
-from pairs import KimmoPair, sort_subsets
+from .pairs import KimmoPair, sort_subsets
from copy import deepcopy
import re, yaml
@@ -65,13 +65,11 @@
def parse_table(name, table, subsets):
lines = table.split('\n')
if len(lines) < 4:
- raise ValueError,\
- "Rule %s has too few lines to be an FSA table." % name
+ raise ValueError("Rule %s has too few lines to be an FSA table." % name)
pairs1 = lines[1].strip().split()
pairs2 = lines[2].strip().split()
if len(pairs1) != len(pairs2):
- raise ValueError,\
- "Rule %s has pair definitions that don't line up." % name
+ raise ValueError("Rule %s has pair definitions that don't line up." % name)
pairs = [KimmoPair(p1, p2) for p1, p2 in zip(pairs1, pairs2)]
finals = []
fsa = FSA()
@@ -80,18 +78,16 @@
if not line: continue
groups = re.match(r'(\w+)(\.|:)\s*(.*)', line)
if groups is None:
- raise ValueError,\
- "Can't parse this line of the state table for rule %s:\n%s"\
- % (name, line)
+ raise ValueError("Can't parse this line of the state table for rule %s:\n%s"\
+ % (name, line))
state, char, morestates = groups.groups()
if fsa.start() == 0: fsa.set_start(state)
if char == ':': finals.append(state)
fsa.add_state(state)
morestates = morestates.split()
if len(morestates) != len(pairs):
- raise ValueError,\
- "Rule %s has a row of the wrong length:\n%s\ngot %d items, should be %d"\
- % (name, line, len(morestates), len(pairs))
+ raise ValueError("Rule %s has a row of the wrong length:\n%s\ngot %d items, should be %d"\
+ % (name, line, len(morestates), len(pairs)))
for pair, nextstate in zip(pairs, morestates):
fsa.insert_safe(state, pair, nextstate)
fsa.set_final(finals)
@@ -101,11 +97,11 @@
def from_dfa_dict(name, states, subsets):
fsa = FSA()
pairs = set([KimmoPair.make('@')])
- for (statename, trans) in states.items():
+ for (statename, trans) in list(states.items()):
for label in trans:
if label != 'others':
pairs.add(KimmoPair.make(label))
- for (statename, trans) in states.items():
+ for (statename, trans) in list(states.items()):
parts = statename.split()
source = parts[-1]
if not parts[0].startswith('rej'):
@@ -120,7 +116,7 @@
for label in trans:
if label != 'others':
used_pairs.add(KimmoPair.make(label))
- for label, target in trans.items():
+ for label, target in list(trans.items()):
if label.lower() == 'others':
fsa.insert_safe(source, KimmoPair.make('@'), target)
for pair in pairs.difference(used_pairs):
@@ -366,11 +362,11 @@
def demo():
rule = KimmoArrowRule("elision-e", "e:0 <== CN u _ +:@ VO", {'@':
'aeiouhklmnpw', 'VO': 'aeiou', 'CN': 'hklmnpw'})
- print rule
- print rule._left_fsa
- print rule._right_fsa
- print
- print rule._fsa
+ print(rule)
+ print(rule._left_fsa)
+ print(rule._right_fsa)
+ print()
+ print(rule._fsa)
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/pairs.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/pairs.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/pairs.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/pairs.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/pairs.py (refactored)
@@ -66,5 +66,5 @@
parts = text.split(':')
if len(parts) == 1: return KimmoPair(text, text)
elif len(parts) == 2: return KimmoPair(parts[0], parts[1])
- else: raise ValueError, "Bad format for pair: %s" % text
+ else: raise ValueError("Bad format for pair: %s" % text)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/morphology.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/morphology.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/morphology.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/morphology.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/morphology.py (refactored)
@@ -1,6 +1,6 @@
-from fsa import FSA
+from .fsa import FSA
import yaml
-from featurelite import unify
+from .featurelite import unify
def startswith(stra, strb):
return stra[:len(strb)] == strb
@@ -44,14 +44,14 @@
def fsa(self): return self._fsa
def valid_lexical(self, state, word, alphabet):
trans = self.fsa()._transitions[state]
- for label in trans.keys():
+ for label in list(trans.keys()):
if label is not None and startswith(label[0], word) and len(label[0]) > len(word):
next = label[0][len(word):]
for pair in alphabet:
if startswith(next, pair.input()): yield pair.input()
def next_states(self, state, word):
choices = self.fsa()._transitions[state]
- for (key, value) in choices.items():
+ for (key, value) in list(choices.items()):
if key is None:
if word == '':
for next in value: yield (next, None)
@@ -102,11 +102,11 @@
word = ''
fsa.insert_safe(state, (word, features), next)
else:
- print "Ignoring line in morphology: %r" % line
+ print("Ignoring line in morphology: %r" % line)
return KimmoMorphology(fsa)
def demo():
- print KimmoMorphology.load('english.lex')
+ print(KimmoMorphology.load('english.lex'))
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmotest.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmotest.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmotest.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmotest.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmotest.py (refactored)
@@ -1,4 +1,4 @@
-from kimmo import *
+from .kimmo import *
k = KimmoRuleSet.load('english.yaml')
-print list(k.generate('`slip+ed', TextTrace(3)))
-print list(k.recognize('slipped', TextTrace(1)))
+print(list(k.generate('`slip+ed', TextTrace(3))))
+print(list(k.recognize('slipped', TextTrace(1))))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmo.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmo.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmo.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmo.py (refactored)
@@ -2,15 +2,15 @@
# by Rob Speer (rspeer@mit.edu)
# based on code from Carl de Marcken, Beracah Yankama, and Rob Speer
-from rules import KimmoArrowRule, KimmoFSARule
-from pairs import KimmoPair, sort_subsets
-from morphology import *
-from fsa import FSA
+from .rules import KimmoArrowRule, KimmoFSARule
+from .pairs import KimmoPair, sort_subsets
+from .morphology import *
+from .fsa import FSA
import yaml
def _pairify(state):
newstate = {}
- for label, targets in state.items():
+ for label, targets in list(state.items()):
newstate[KimmoPair.make(label)] = targets
return newstate
@@ -191,7 +191,7 @@
def _advance_rule(self, rule, state, pair):
trans = rule.fsa()._transitions[state]
- expected_pairs = sort_subsets(trans.keys(), self._subsets)
+ expected_pairs = sort_subsets(list(trans.keys()), self._subsets)
for comppair in expected_pairs:
if comppair.includes(pair, self._subsets):
return rule.fsa().nextState(state, comppair)
@@ -200,16 +200,16 @@
def _test_case(self, input, outputs, arrow, method):
outputs.sort()
if arrow == '<=':
- print '%s %s %s' % (', '.join(outputs), arrow, input)
+ print('%s %s %s' % (', '.join(outputs), arrow, input))
else:
- print '%s %s %s' % (input, arrow, ', '.join(outputs))
+ print('%s %s %s' % (input, arrow, ', '.join(outputs)))
value = method(input)
if len(value) and isinstance(value[0], tuple):
results = [v[0] for v in value]
else: results = value
results.sort()
if outputs != results:
- print ' Failed: got %s' % (', '.join(results) or 'no results')
+ print(' Failed: got %s' % (', '.join(results) or 'no results'))
return False
else: return True
@@ -244,7 +244,7 @@
arrow = arrow_to_try
break
if arrow is None:
- raise ValueError, "Can't find arrow in line: %s" % line
+ raise ValueError("Can't find arrow in line: %s" % line)
lexicals = lexicals.strip().split(', ')
surfaces = surfaces.strip().split(', ')
if lexicals == ['']: lexicals = []
@@ -348,28 +348,28 @@
if lexicon:
lexicon = KimmoMorphology.load(lexicon)
subsets = map['subsets']
- for key, value in subsets.items():
- if isinstance(value, basestring):
+ for key, value in list(subsets.items()):
+ if isinstance(value, str):
subsets[key] = value.split()
defaults = map['defaults']
- if isinstance(defaults, basestring):
+ if isinstance(defaults, str):
defaults = defaults.split()
defaults = [KimmoPair.make(text) for text in defaults]
ruledic = map['rules']
rules = []
- for (name, rule) in ruledic.items():
+ for (name, rule) in list(ruledic.items()):
if isinstance(rule, dict):
rules.append(KimmoFSARule.from_dfa_dict(name, rule, subsets))
- elif isinstance(rule, basestring):
+ elif isinstance(rule, str):
if rule.strip().startswith('FSA'):
rules.append(KimmoFSARule.parse_table(name, rule, subsets))
else: rules.append(KimmoArrowRule(name, rule, subsets))
else:
- raise ValueError, "Can't recognize the data structure in '%s' as a rule: %s" % (name, rule)
+ raise ValueError("Can't recognize the data structure in '%s' as a rule: %s" % (name, rule))
return cls(subsets, defaults, rules, lexicon)
def gui(self, startTk=True):
- import draw
+ from . import draw
return draw.KimmoGUI(self, startTk)
draw_graphs = guRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/kimmo.py
i
@@ -392,50 +392,50 @@
surface = ''.join(p.output() for p in pairs)
indent = ' '*len(lexical)
if self.verbosity > 2:
- print '%s%s<%s>' % (indent, lexical, curr.input())
- print '%s%s<%s>' % (indent, surface, curr.output())
+ print('%s%s<%s>' % (indent, lexical, curr.input()))
+ print('%s%s<%s>' % (indent, surface, curr.output()))
for rule, state1, state2 in zip(rules, prev_states, states):
- print '%s%s: %s => %s' % (indent, rule.name(), state1, state2)
+ print('%s%s: %s => %s' % (indent, rule.name(), state1, state2))
if morphology_state:
- print '%sMorphology: %r => %s' % (indent, word, morphology_state)
- print
+ print('%sMorphology: %r => %s' % (indent, word, morphology_state))
+ print()
elif self.verbosity > 1:
- print '%s%s<%s>' % (indent, lexical, curr.input())
- print '%s%s<%s>' % (indent, surface, curr.output())
- z = zip(prev_states, states)
+ print('%s%s<%s>' % (indent, lexical, curr.input()))
+ print('%s%s<%s>' % (indent, surface, curr.output()))
+ z = list(zip(prev_states, states))
if morphology_state:
z.append((word, morphology_state))
- print indent + (" ".join('%s>%s' % (old, new) for old, new in z))
+ print(indent + (" ".join('%s>%s' % (old, new) for old, new in z)))
blocked = []
for rule, state in zip(rules, states):
if str(state).lower() in ['0', 'reject']:
blocked.append(rule.name())
if blocked:
- print '%s[blocked by %s]' % (indent, ", ".join(blocked))
- print
+ print('%s[blocked by %s]' % (indent, ", ".join(blocked)))
+ print()
else:
- print '%s%s<%s> | %s<%s>' % (indent, lexical, curr.input(),
- surface, curr.output()),
+ print('%s%s<%s> | %s<%s>' % (indent, lexical, curr.input(),
+ surface, curr.output()), end=' ')
if morphology_state:
- print '\t%r => %s' % (word, morphology_state),
+ print('\t%r => %s' % (word, morphology_state), end=' ')
blocked = []
for rule, state in zip(rules, states):
if str(state).lower() in ['0', 'reject']:
blocked.append(rule.name())
if blocked:
- print ' [blocked by %s]' % (", ".join(blocked)),
- print
+ print(' [blocked by %s]' % (", ".join(blocked)), end=' ')
+ print()
def succeed(self, pairs):
lexical = ''.join(p.input() for p in pairs)
surface = ''.join(p.output() for p in pairs)
indent = ' '*len(lexical)
- print '%s%s' % (indent, lexical)
- print '%s%s' % (indent, surface)
- print '%sSUCCESS: %s <=> %s' % (indent, lexical, surface)
- print
- print
+ print('%s%s' % (indent, lexical))
+ print('%s%s' % (indent, surface))
+ print('%sSUCCESS: %s <=> %s' % (indent, lexical, surface))
+ print()
+ print()
def load(filename):
"""
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/fsa.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/fsa.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/fsa.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/fsa.py (refactored)
@@ -63,8 +63,8 @@
A generator that yields each transition arrow in the FSA in the form
(source, label, target).
"""
- for (state, map) in self._transitions.items():
- for (symbol, targets) in map.items():
+ for (state, map) in list(self._transitions.items()):
+ for (symbol, targets) in list(map.items()):
for target in targets:
yield (state, symbol, target)
@@ -73,7 +73,7 @@
A generator for all possible labels taking state s1 to state s2.
"""
map = self._transitions.get(s1, {})
- for (symbol, targets) in map.items():
+ for (symbol, targets) in list(map.items()):
if s2 in targets: yield symbol
def sigma(self):
@@ -134,7 +134,7 @@
@returns: a list of all states in the FSA.
@rtype: list
"""
- return self._transitions.keys()
+ return list(self._transitions.keys())
def add_final(self, state):
"""
@@ -184,11 +184,11 @@
@param s2: the destination of the transition
"""
if s1 not in self.states():
- raise ValueError, "State %s does not exist in %s" % (s1,
- self.states())
+ raise ValueError("State %s does not exist in %s" % (s1,
+ self.states()))
if s2 not in self.states():
- raise ValueError, "State %s does not exist in %s" % (s2,
- self.states())
+ raise ValueError("State %s does not exist in %s" % (s2,
+ self.states()))
self._add_transition(self._transitions, s1, label, s2)
self._add_transition(self._reverse, s2, label, s1)
@@ -212,16 +212,16 @@
@param s2: the destination of the transition
"""
if s1 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
if s2 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
self._del_transition(self._transitions, s1, label, s2)
self._del_transition(self._reverse, s2, label, s1)
def delete_state(self, state):
"Removes a state and all its transitions from the FSA."
if state not in self.states():
- raise ValueError, "State %s does not exist" % state
+ raise ValueError("State %s does not exist" % state)
for (s1, label, s2) in self.incident_transitions(state):
self.delete(s1, label, s2)
del self._transitions[state]
@@ -235,10 +235,10 @@
result = set()
forward = self._transitions[state]
backward = self._reverse[state]
- for label, targets in forward.items():
+ for label, targets in list(forward.items()):
for target in targets:
result.add((state, label, target))
- for label, targets in backward.items():
+ for label, targets in list(backward.items()):
for target in targets:
result.add((target, label, state))
return result
@@ -248,9 +248,9 @@
Assigns a state a new identifier.
"""
if old not in self.states():
- raise ValueError, "State %s does not exist" % old
+ raise ValueError("State %s does not exist" % old)
if new in self.states():
- raise ValueError, "State %s already exists" % new
+ raise ValueError("State %s already exists" % new)
changes = []
for (s1, symbol, s2) in self.generate_transitions():
if s1 == old and s2 == old:
@@ -261,7 +261,7 @@
changes.append((s1, symbol, s2, s1, symbol, new))
for (leftstate, symbol, rightstate, newleft, newsym, newright)\
in changes:
- print leftstate, symbolRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/fsa.py
, rightstate, newleft, newsym, newright
+ print(leftstate, symbol, rightstate, newleft, newsym, newright)
self.delete(leftstate, symbol, rightstate)
self.insert_safe(newleft, newsym, newright)
del self._transitions[old]
@@ -284,8 +284,8 @@
Return whether this is a DFA
(every symbol leads from a state to at most one target state).
"""
- for map in self._transitions.values():
- for targets in map.values():
+ for map in list(self._transitions.values()):
+ for targets in list(map.values()):
if len(targets) > 1: return False
return True
@@ -297,14 +297,14 @@
"""
next = self.next(state, symbol)
if len(next) > 1:
- raise ValueError, "This FSA is nondeterministic -- use nextStates instead."
+ raise ValueError("This FSA is nondeterministic -- use nextStates instead.")
elif len(next) == 1: return list(next)[0]
else: return None
def forward_traverse(self, state):
"All states reachable by following transitions from a given state."
result = set()
- for (symbol, targets) in self._transitions[state].items():
+ for (symbol, targets) in list(self._transitions[state].items()):
result = result.union(targets)
return result
@@ -312,7 +312,7 @@
"""All states from which a given state is reachable by following
transitions."""
result = set()
- for (symbol, targets) in self._reverse[state].items():
+ for (symbol, targets) in list(self._reverse[state].items()):
result = result.union(targets)
return result
@@ -344,7 +344,7 @@
self._clean_map(self._reverse[state])
def _clean_map(self, map):
- for (key, value) in map.items():
+ for (key, value) in list(map.items()):
if len(value) == 0:
del map[key]
@@ -406,7 +406,7 @@
for label in self.sigma():
nfa_next = tuple(self.e_closure(self.move(map[dfa_state],
label)))
- if map.has_key(nfa_next):
+ if nfa_next in map:
dfa_next = map[nfa_next]
else:
dfa_next = dfa.new_state()
@@ -422,7 +422,7 @@
"Generate all accepting sequences of length at most maxlen."
if maxlen > 0:
if state in self._finals:
- print prefix
+ print(prefix)
for (s1, labels, s2) in self.outgoing_transitions(state):
for label in labels():
self.generate(maxlen-1, s2, prefix+label)
@@ -431,14 +431,14 @@
"""
Print a representation of this FSA (in human-readable YAML format).
"""
- print yaml.dump(self)
+ print(yaml.dump(self))
@classmethod
def from_yaml(cls, loader, node):
map = loader.construct_mapping(node)
result = cls(map.get('sigma', []), {}, map.get('finals', []))
- for (s1, map1) in map['transitions'].items():
- for (symbol, targets) in map1.items():
+ for (s1, map1) in list(map['transitions'].items()):
+ for (symbol, targets) in list(map1.items()):
for s2 in targets:
result.insert(s1, symbol, s2)
return result
@@ -590,19 +590,19 @@
# Use a regular expression to initialize the FSA.
re = 'abcd'
- print 'Regular Expression:', re
+ print('Regular Expression:', re)
re2nfa(fsa, re)
- print "NFA:"
+ print("NFA:")
fsa.pp()
# Convert the (nondeterministic) FSA to a deterministic FSA.
dfa = fsa.dfa()
- print "DFA:"
+ print("DFA:")
dfa.pp()
# Prune the DFA
dfa.prune()
- print "PRUNED DFA:"
+ print("PRUNED DFA:")
dfa.pp()
# Use the FSA to generate all strings of length less than 3
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/featurelite.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/featurelite.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/featurelite.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/featurelite.py (refactored)
@@ -91,7 +91,7 @@
instantiated.
"""
def __init__(self):
- raise TypeError, "The _FORWARD class is not meant to be instantiated"
+ raise TypeError("The _FORWARD class is not meant to be instantiated")
class Variable(object):
"""
@@ -237,7 +237,7 @@
def variable_representer(dumper, var):
"Output variables in YAML as ?name."
- return dumper.represent_scalar(u'!var', u'?%s' % var.name())
+ return dumper.represent_scalar('!var', '?%s' % var.name())
yaml.add_representer(Variable, variable_representer)
def variable_constructor(loader, node):
@@ -245,8 +245,8 @@
value = loader.construct_scalar(node)
name = value[1:]
return Variable(name)
-yaml.add_constructor(u'!var', variable_constructor)
-yaml.add_implicit_resolver(u'!var', re.compile(r'^\?\w+$'))
+yaml.add_constructor('!var', variable_constructor)
+yaml.add_implicit_resolver('!var', re.compile(r'^\?\w+$'))
def _copy_and_bind(feature, bindings, memo=None):
"""
@@ -258,14 +258,14 @@
if memo is None: memo = {}
if id(feature) in memo: return memo[id(feature)]
if isinstance(feature, Variable) and bindings is not None:
- if not bindings.has_key(feature.name()):
+ if feature.name() not in bindings:
bindings[feature.name()] = feature.copy()
result = _copy_and_bind(bindings[feature.name()], None, memo)
else:
if isMapping(feature):
# Construct a new object of the same class
result = feature.__class__()
- for (key, value) in feature.items():
+ for (key, value) in list(feature.items()):
result[key] = _copy_and_bind(value, bindings, memo)
else: result = feature
memo[id(feature)] = result
@@ -576,7 +576,7 @@
copy2 = _copy_and_bind(feature2, bindings2, copymemo)
# Preserve links between bound variables and the two feature structures.
for b in (bindings1, bindings2):
- for (vname, value) in b.items():
+ for (vname, value) in list(b.items()):
value_id = id(value)
if value_id in copymemo:
b[vname] = copymemo[value_id]
@@ -602,7 +602,7 @@
UnificationFailure is raised, and the values of C{self}
and C{other} are undefined.
"""
- if memo.has_key((id(feature1), id(feature2))):
+ if (id(feature1), id(feature2)) in memo:
return memo[id(feature1), id(feature2)]
unified = _do_unify(feature1, feature2, bindings1, bindings2, memo, fail)
memo[id(feature1), id(feature2)] = unified
@@ -643,9 +643,9 @@
# At this point, we know they're both mappings.
# Do the destructive part of unification.
- while feature2.has_key(_FORWARD): feature2 = feature2[_FORWARD]
+ while _FORWARD in feature2: feature2 = feature2[_FORWARD]
feature2[_FORWARD] = feature1
- for (fname, val2) in feature2.items():
+ for (fname, val2) in list(feature2.items()):
if fname == _FORWARD: continue
val1 = feature1.get(fname)
feature1[fname] = _destructively_unify(val1, val2, bindings1,
@@ -658,12 +658,12 @@
the target of its forward pointer (to preserve reentrance).
"""
if not isMapping(feature): return
- if visited.has_key(id(feature)): return
+ if id(feature) in visited: return
visited[id(feature)] = True
- for fname, fval in feature.items():
+ for fname, fval in list(feature.items()):
if isMapping(fval):
- while fval.has_key(_FORWARD):
+ while _FORWARD in fval:
fval = fval[_FORWARD]
feature[fname] = fval
_apply_forwards(fval, visited)
@@ -695,10 +695,10 @@
else:
return var.forwarded_self()
if not isMapping(mapping): return mapping
- if visited.has_key(id(mapping)): return mapping
+ if id(mapping) in visited: return mapping
visited[id(mapping)] = TRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/featurelite.py
rue
- for fname, fval in mapping.items():
+ for fname, fval in list(mapping.items()):
if isMapping(fval):
_lookup_values(fval, visited)
elif isinstance(fval, Variable):
@@ -719,9 +719,9 @@
Replace any feature structures that have been forwarded by their new
identities.
"""
- for (key, value) in bindings.items():
- if isMapping(value) and value.has_key(_FORWARD):
- while value.has_key(_FORWARD):
+ for (key, value) in list(bindings.items()):
+ if isMapping(value) and _FORWARD in value:
+ while _FORWARD in value:
value = value[_FORWARD]
bindings[key] = value
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/draw.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/draw.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/draw.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/draw.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/draw.py (refactored)
@@ -1,10 +1,10 @@
-import Tkinter as tk
-from morphology import KimmoMorphology
-from fsa import FSA
+import tkinter as tk
+from .morphology import KimmoMorphology
+from .fsa import FSA
class KimmoGUI(object):
def __init__(self, ruleset, startTk=False):
- import Tkinter as tk
+ import tkinter as tk
if startTk: self._root = tk.Tk()
else: self._root = tk.Toplevel()
@@ -131,7 +131,7 @@
def highlight_states(self, states, morph):
select = self.listbox.curselection() or 0
self.listbox.delete(0, tk.END)
- for (index, stored) in self.widget_store.items():
+ for (index, stored) in list(self.widget_store.items()):
graph, widget = stored
if index == -1: state = morph
else: state = states[index]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/mit/six863/kimmo/__init__.py (refactored)
@@ -1 +1 @@
-from kimmo import *
+from .kimmo import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/six863/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/mit/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/paradigmquery.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/paradigmquery.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/paradigmquery.py
--- ../python3/nltk_contrib/nltk_contrib/misc/paradigmquery.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/paradigmquery.py (refactored)
@@ -47,7 +47,7 @@
self.xml = None
# If p_string was given, parse it
- if p_string <> None:
+ if p_string != None:
self.parse(p_string)
def parse(self, p_string):
@@ -124,7 +124,7 @@
try:
self.parseList = rd_parser.get_parse_list(toklist)[0]
except IndexError:
- print "Could not parse query."
+ print("Could not parse query.")
return
# Set the nltk.parse.tree tree for this query to the global sentence
@@ -142,13 +142,13 @@
Returns the results from the CFG parsing
"""
if self.string == None:
- print "No string has been parsed. Please use parse(string)."
+ print("No string has been parsed. Please use parse(string).")
return None
return self.nltktree
def getXML(self):
if self.string == None:
- print "No string has been parsed. Please use parse(string)."
+ print("No string has been parsed. Please use parse(string).")
return None
return '\n' + self.xml \
+ ""
@@ -279,16 +279,16 @@
query = r'table(one/two/three, four, five)'
# Print the query
- print """
+ print("""
================================================================================
Query: ParadigmQuery(query)
================================================================================
-"""
+""")
a = ParadigmQuery(query)
- print query
+ print(query)
# Print the Tree representation
- print """
+ print("""
================================================================================
Tree: getTree()
O is an operator
@@ -296,19 +296,19 @@
H is a hierarchy
D is a domain
================================================================================
-"""
- print a.getTree()
+""")
+ print(a.getTree())
# Print the XML representation
- print """
+ print("""
================================================================================
XML: getXML()
================================================================================
-"""
- print a.getXML()
+""")
+ print(a.getXML())
# Some space
- print
+ print()
if __name__ == '__main__':
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/paradigm.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/paradigm.py
--- ../python3/nltk_contrib/nltk_contrib/misc/paradigm.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/paradigm.py (refactored)
@@ -22,7 +22,7 @@
# a.setOutput('term') # output is sent to terminal
from xml.dom.ext.reader import Sax2
-from paradigmquery import ParadigmQuery
+from .paradigmquery import ParadigmQuery
import re, os
class Paradigm(object):
@@ -73,9 +73,9 @@
s = ""
while s != "exit":
s = "exit"
- try: s = raw_input(">")
+ try: s = input(">")
except EOFError:
- print s
+ print(s)
if s == "exit":
return
if s == "quit":
@@ -93,7 +93,7 @@
# parse the query
parse = ParadigmQuery(p_string)
except:
- print "Could not parse query."
+ print("Could not parse query.")
return
try:
@@ -103,7 +103,7 @@
if result == None:
raise Error
except:
- print "Sorry, no result can be returned"
+ print("Sorry, no result can be returned")
return
try:
@@ -111,7 +111,7 @@
if self.format == "html":
output = '\n'
# Include CSS if we need to
- if self.css <> None:
+ if self.css != None:
output += '\n'
@@ -124,14 +124,14 @@
output = result.getText()
except:
output = None
- print "--no output--"
+ print("--no output--")
return
# Print to terminal if output is set, otherwise to file
if self.output == "term":
- print output
+ print(output)
else:
- print "Output written to file:", self.output
+ print("Output written to file:", self.output)
f = open(self.output, 'w')
f.write(output)
@@ -151,9 +151,9 @@
elif p_string == "text":
self.format = "text"
else:
- print "Unknown format:", p_string
- print "Valid formats are: text, html"
- print "Setting format = text"
+ print("Unknown format:", p_string)
+ print("Valid formats are: text, html")
+ print("Setting format = text")
self.format = "text"
def setCSS(self, p_string=None):
@@ -161,8 +161,8 @@
Set the file location for a Cascading Stylesheet: None or filename
This allows for simple formatting
"""
- if p_string <> None:
- print "Using CSS file:", p_string
+ if p_string != None:
+ print("Using CSS file:", p_string)
self.output = p_string
def setOutput(self, p_string=None):
@@ -174,9 +174,9 @@
p_string = "term"
# set to term if requested, otherwise filename
if p_string == "term":
- print "Directing output to terminal"
+ print("Directing output to terminal")
else:
- print "Directing output to file:", p_string
+ print("Directing output to file:", p_string)
self.output = p_string
@@ -201,7 +201,7 @@
f = open(try_filename)
p_filename = try_filename
except IOError:
- print "Cannot find file"
+ print("Cannot find file")
return None
f.close()
@@ -241,14 +241,14 @@
self.data.append(tmp_dict)
# Talk to the user
- print "Paradigm information successfully loaded from file:", p_filename
+ print("Paradigm information successfully loaded from file:", p_filename)
# State the number and print out a list of attributes
- print " "*4 + str(len(self.attributes)) + " attributes imported:",
+ print(" "*4 + str(len(self.attributes)) + " attributes imported:", end=' ')
for att in self.attributes:
- print att,
- print
+ print(att, end=' ')
+ print()
# State the number of paradigm objects imported
- print " "*4 + str(len(self.data)) + " paradigm objects imported."
+ print(" "*4 + str(len(self.data)) + " paradigm objects imported.")
return
@@ -360,7 +360,7 @@
self.paradigm.attributes[self.attribute]
except KeyError:
self.error = "I couldn't find this attribute: " + self.attribute
- print self.error
+ print(self.error)
def __getitem__(self, p_index):
return self.paradigm.attributes[self.attribute][p_index]
@@ -616,10 +616,10 @@
vertical_header_rows = vertical_header.split('')
cell_rows = str_cells.replace('','').split('
')
# Join two lists
- zipped = zip(vertical_header_rows, cell_rows)
+ zipped = list(zip(vertical_header_rows, cell_rows))
str_zipped = ""
for (header,cells) in zipped:
- if header <> '':
+ if header != '':
str_zipped += header + cells + "\n"
# Return all the elements
@@ -629,22 +629,22 @@
"""
Return a horizontal html table (?)
"""
- print "?: getHorizontalHTML() called on a table."
+ print("?: getHorizontalHTML() called on a table.")
return None
def getText(self):
"""
Return text for this table (?)
"""
- print "?: getText() for a table? HAHAHAHAHA"
- print "call setFormat('html') if you want to run queries like that"
+ print("?: getText() for a table? HAHAHAHAHA")
+ print("call setFormat('html') if you want to run queries like that")
return
def getConditions(self):
"""
Return conditions for this table (?)
"""
- print "?: getConditions() called on a table. I don't think so."
+ print("?: getConditions() called on a table. I don't think so.")
return None
def getMaxWidth(self):
@@ -658,7 +658,7 @@
"""
Return span for this table (?)
"""
- print "WTF: getSpan() called on a table."
+ print("WTF: getSpan() called on a table.")
return None
def getData(self, p_return, p_attDict):
@@ -676,7 +676,7 @@
for datum in self.paradigm.data:
inc = True
# For each given attribute requirement
- for att in p_attDict.keys():
+ for att in list(p_attDict.keys()):
# If the data object fails the requirement do not include
if datum[att] != p_attDict[att]:
inc = False
@@ -704,74 +704,74 @@
If there is any key overlap, dict1 wins!
(just make sure this doesn't happen)
"""
- for key in dict1.keys():
+ for key in list(dict1.keys()):
dict2[key] = dict1[key]
return dict2
def demo():
# Print the query
- print """
+ print("""
================================================================================
Load: Paradigm(file)
================================================================================
-"""
- print
- print ">>> a = Paradigm('german.xml')"
- print
+""")
+ print()
+ print(">>> a = Paradigm('german.xml')")
+ print()
a = Paradigm('german.xml')
- print
- print ">>> a.setOutput('term')"
- print
+ print()
+ print(">>> a.setOutput('term')")
+ print()
a.setOutput('term')
- print
- print ">>> a.setFormat('text')"
- print
+ print()
+ print(">>> a.setFormat('text')")
+ print()
a.setFormat('text')
# Print a domain
- print """
+ print("""
================================================================================
Domain: case
================================================================================
-"""
- print
- print ">>> a.show('case')"
- print
+""")
+ print()
+ print(">>> a.show('case')")
+ print()
a.show('case')
# PrinRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/paradigm.py
t a hierarchy
- print """
+ print("""
================================================================================
Hierarchy: case/gender
================================================================================
-"""
- print
- print ">>> a.show('case/gender')"
- print
+""")
+ print()
+ print(">>> a.show('case/gender')")
+ print()
a.show('case/gender')
# Print a table
- print """
+ print("""
================================================================================
Table: table(case/number,gender,content)
================================================================================
-"""
- print
- print ">>> a.setOutput('demo.html')"
- print
+""")
+ print()
+ print(">>> a.setOutput('demo.html')")
+ print()
a.setOutput('demo.html')
- print
- print ">>> a.setFormat('html')"
- print
+ print()
+ print(">>> a.setFormat('html')")
+ print()
a.setFormat('html')
- print
- print ">>> a.show('table(case/number,gender,content)')"
- print
+ print()
+ print(">>> a.show('table(case/number,gender,content)')")
+ print()
a.show('table(case/number,gender,content)')
# Some space
- print
+ print()
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/marshalbrill.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/marshalbrill.py
--- ../python3/nltk_contrib/nltk_contrib/misc/marshalbrill.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/marshalbrill.py (refactored)
@@ -187,7 +187,7 @@
rule.
@rtype: C{list} of C{int}
"""
- return self.apply_at(tokens, range(len(tokens)))
+ return self.apply_at(tokens, list(range(len(tokens))))
def apply_at(self, tokens, positions):
"""
@@ -373,7 +373,7 @@
# Needs to include extract_property in order to distinguish subclasses
# A nicer way would be welcome.
return hash( (self._original, self._replacement, self._conditions,
- self.extract_property.func_code) )
+ self.extract_property.__code__) )
def __repr__(self):
conditions = ' and '.join(['%s in %d...%d' % (v,s,e)
@@ -456,7 +456,7 @@
C{Brill} training algorithms to generate candidate rules.
"""
def __init__(self):
- raise AssertionError, "BrillTemplateI is an abstract interface"
+ raise AssertionError("BrillTemplateI is an abstract interface")
def applicable_rules(self, tokens, i, correctTag):
"""
@@ -478,7 +478,7 @@
@type correctTag: (any)
@rtype: C{list} of L{BrillRuleI}
"""
- raise AssertionError, "BrillTemplateI is an abstract interface"
+ raise AssertionError("BrillTemplateI is an abstract interface")
def get_neighborhood(self, token, index):
"""
@@ -494,7 +494,7 @@
@type index: C{int}
@rtype: C{Set}
"""
- raise AssertionError, "BrillTemplateI is an abstract interface"
+ raise AssertionError("BrillTemplateI is an abstract interface")
class ProximateTokensTemplate(BrillTemplateI):
"""
@@ -671,8 +671,8 @@
@param min_score: The minimum acceptable net error reduction
that each transformation must produce in the corpus.
"""
- if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
- len(train_tokens))
+ if self._trace > 0: print(("Training Brill tagger on %d tokens..." %
+ len(train_tokens)))
# Create a new copy of the training token, and run the initial
# tagger on this. We will progressively update this test
@@ -691,7 +691,7 @@
train_tokens)
if rule is None or score < min_score:
if self._trace > 1:
- print 'Insufficient improvement; stopping'
+ print('Insufficient improvement; stopping')
break
else:
# Add the rule to our list of rules.
@@ -746,7 +746,7 @@
# once for each tag that the rule changes to an incorrect
# value.
score = fixscore
- if correct_indices.has_key(rule.original_tag()):
+ if rule.original_tag() in correct_indices:
for i in correct_indices[rule.original_tag()]:
if rule.applies(test_tokens, i):
score -= 1
@@ -791,7 +791,7 @@
# Convert the dictionary into a list of (rule, score) tuples,
# sorted in descending order of score.
- rule_score_items = rule_score_dict.items()
+ rule_score_items = list(rule_score_dict.items())
temp = [(-score, rule) for (rule, score) in rule_score_items]
temp.sort()
return [(rule, -negscore) for (negscore, rule) in temp]
@@ -818,7 +818,7 @@
#////////////////////////////////////////////////////////////
def _trace_header(self):
- print """
+ print("""
B |
S F r O | Score = Fixed - Broken
c i o t | R Fixed = num tags changed incorrect -> correct
@@ -826,13 +826,13 @@
r e e e | l Other = num tags changed incorrect -> incorrect
e d n r | e
------------------+-------------------------------------------------------
- """.rstrip()
+ """.rstrip())
def _trace_rule(self, rule, score, fixscore, numchanges):
if self._trace > 2:
- print ('%4d%4d%4d%4d ' % (score, fixscore, fixscore-score,
- numchanges-fixscore*2+score)), '|',
- print rule
+ print(('%4d%4d%4d%4d ' % (score, fixscore, fixscore-score,
+ numchanges-fixscore*2+score)), '|', end=' ')
+ print(rule)
######################################################################
## Fast Brill Tagger Trainer
@@ -899,7 +899,7 @@
# If the rule is already known to apply here, ignore.
# (This only happens if the position's tag hasn't changed.)
- if positionsByRule[rule].has_key(i):
+ if i in positionsByRule[rule]:
return
if rule.replacement_tag() == train_tokens[i][1]:
@@ -912,7 +912,7 @@
# Update rules in the other dictionaries
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] += positionsByRule[rule][i]
- if not rulesByScore.has_key(ruleScores[rule]):
+ if ruleScores[rule] not in rulesByScore:
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
rulesByPosition[i].add(rule)
@@ -922,7 +922,7 @@
def _updateRuleNotApplies (rule, i):
del rulesByScore[ruleScores[rule]][rule]
ruleScores[rule] -= positionsByRule[rule][i]
- if not rulesByScore.has_key(ruleScores[rule]):
+ if ruleScores[rule] not in rulesByScore:
rulesByScore[ruleScores[rule]] = {}
rulesByScore[ruleScores[rule]][rule] = None
@@ -939,22 +939,22 @@
tag = tagged_tokens[i][1]
if tag != train_tokens[i][1]:
errorIndices.append(i)
- if not tagIndices.has_key(tag):
+ if tag not in tagIndices:
tagIndices[tag] = []
tagIndices[tag].append(i)
- print "Finding useful rules..."
+ print("Finding useful rules...")
# Collect all rules that fix any errors, with their positive scores.
for i in errorIndices:
for template in self._templates:
# Find the templated rules that could fix the error.
for rule in template.applicable_rules(tagged_tokens, i,
train_tokens[i][1]):
- if not positionsByRule.has_key(rule):
+ if rule not in positionsByRule:
_initRule(rule)
_updateRuleApplies(rule, i)
- print "Done initializing %i useful rules." %len(positionsByRule)
+ print("Done initializing %i useful rules." %len(positionsByRule))
if TESTING:
after = -1 # bug-check only
@@ -973,7 +973,7 @@
# best rule.
bestRule = None
- bestRules = rulesByScore[maxScore].keys()
+ bestRules = list(rulesByScore[maxScore].keys())
for rule in bestRules:
# Find the first relevant index at or following the first
@@ -990,7 +990,7 @@
# If we checked all remaining indices and found no more errors:
if ruleScores[rule] == maxScore:
firstUnknownIndex[rule] = len(tagged_tokens) # i.e., we checked them all
- print "%i) %s (score: %i)" %(len(rules)+1, rule, maxScore)
+ print("%i) %s (score: %i)" %(len(rules)+1, rule, maxScore))
bestRule = rule
break
@@ -1002,29 +1002,29 @@
# bug-check only
if TESTING:
before = len(_errorPositions(tagged_tokens, train_tokens))
- print "There are %i errors before applying this rule." %before
+ print("There are %i errors before applying this rule." %before)
assert after == -1 or before == after, \
"after=%i but before=%i" %(after,before)
- print "Applying best rule at %i locations..." \
- %len(positionsByRule[bestRule].keys())
+ print("Applying best rule at %i locations..." \
+ %len(list(positionsByRule[bestRule].keys())))
# If we reach this point, we've found a new best rule.
# Apply the rule at the relevant sites.
# (apply_at is a little inefficient here, since we know the rule applies
# and don't actually need to test it again.)
rules.append(bestRule)
- bestRule.apply_at(tagged_tokens, positionsByRule[bestRule].keys())
+ bestRule.apply_at(tagged_tokens, list(positionsByRule[bestRule].keys()))
# Update the tag index accordingly.
- for i in positionsByRule[bestRule].keys(): # where it applied
+ for i in list(positionsByRule[bestRule].keys()): # where it applied
# Update positions of tags
# First, find and delete the index for i from the old tag.
oldIndex = bisect.bisect_left(tagIndices[bestRule.original_tag()], i)
del tagIndices[bestRule.original_tag()][oldIndex]
# Then, insert i into the index list of the new tag.
- if not tagIndices.has_key(bestRule.replacement_tag()):
+ if bestRule.replacement_tag() not in tagIndices:
tagIndices[bestRule.replacement_tag()] = []
newIndex = bisect.bisect_left(tagIndices[bestRule.replacement_tag()], i)
tagIndices[bestRule.replacement_tag()].insert(newIndex, i)
@@ -1037,11 +1037,11 @@
#
# If a template now generates a different set of rules, we have
# to update our indices to reflect that.
- print "Updating neighborhoods of changed sites.\n"
+ print("Updating neighborhoods of changed sites.\n")
# First, collect all the indices that might get new rules.
neighbors = set()
- for i in positionsByRule[bestRule].keys(): # sites changed
+ for i in list(positionsByRule[bestRule].keys()): # sites changed
for template in self._templates:
neighbors.update(template.get_neighborhood(tagged_tokens, i))
@@ -1062,21 +1062,21 @@
# Update rules only now generated by this template
for newRule in siteRules - rulesByPosition[i]:
d += 1
- if not positionsByRule.has_key(newRule):
+ if newRule not in positionsByRule:
e += 1
_initRule(newRule) # make a new rule w/score=0
_updateRuleApplies(newRule, i) # increment score, etc.
if TESTING:
after = before - maxScore
- print "%i obsolete rule applications, %i new ones, " %(c,d)+ \
- "using %i previously-unseen rules." %e
+ print("%i obsolete rule applications, %i new ones, " %(c,d)+ \
+ "using %i previously-unseen rules." %e)
maxScore = max(rulesByScore.keys()) # may have gone up
- if self._trace > 0: print ("Training Brill tagger on %d tokens..." %
- len(train_tokens))
+ if self._trace > 0: print(("Training Brill tagger on %d tokens..." %
+ len(train_tokens)))
# Maintain a list of the rules that apply at each position.
rules_by_position = [{} for tok in train_tokens]
@@ -1164,7 +1164,7 @@
# train is the proportion of data used in training; the rest is reserved
# for testing.
- print "Loading tagged data..."
+ print("Loading tagged data...")
sents = []
for item in treebank.items:
sents.extenRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/marshalbrill.py
d(treebank.tagged(item))
@@ -1182,13 +1182,13 @@
# Unigram tagger
- print "Training unigram tagger:",
+ print("Training unigram tagger:", end=' ')
u = tag.Unigram(backoff=NN_CD_tagger)
# NB training and testing are required to use a list-of-lists structure,
# so we wrap the flattened corpus data with the extra list structure.
u.train([training_data])
- print("[accuracy: %f]" % tag.accuracy(u, [gold_data]))
+ print(("[accuracy: %f]" % tag.accuracy(u, [gold_data])))
# Brill tagger
@@ -1209,13 +1209,13 @@
trainer = brill.BrillTrainer(u, templates, trace)
b = trainer.train(training_data, max_rules, min_score)
- print
- print("Brill accuracy: %f" % tag.accuracy(b, [gold_data]))
+ print()
+ print(("Brill accuracy: %f" % tag.accuracy(b, [gold_data])))
print("\nRules: ")
printRules = file(rule_output, 'w')
for rule in b.rules():
- print(str(rule))
+ print((str(rule)))
printRules.write(str(rule)+"\n\n")
testing_data = list(b.tag(testing_data))
@@ -1225,7 +1225,7 @@
for e in el:
errorFile.write(e+"\n\n")
errorFile.close()
- print "Done; rules and errors saved to %s and %s." % (rule_output, error_output)
+ print("Done; rules and errors saved to %s and %s." % (rule_output, error_output))
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/marshal.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/marshal.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/marshal.py
--- ../python3/nltk_contrib/nltk_contrib/misc/marshal.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/marshal.py (refactored)
@@ -56,7 +56,7 @@
"""
handler = file(filename, "w")
- for text, tag in self._model.iteritems():
+ for text, tag in self._model.items():
handler.write("%s:%s\n" % (text, tag))
handler.close()
@@ -97,7 +97,7 @@
handler.write("length %i\n" % self._length)
handler.write("minlength %i\n" % self._minlength)
- for text, tag in self._model.iteritems():
+ for text, tag in self._model.items():
handler.write("%s:%s\n" % (text, tag))
handler.close()
@@ -203,4 +203,4 @@
#tagger.marshal("ngram.test")
tagger.unmarshal("ngram.test")
- print tagger._model
+ print(tagger._model)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/lex.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/lex.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/lex.py
--- ../python3/nltk_contrib/nltk_contrib/misc/lex.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/lex.py (refactored)
@@ -31,7 +31,7 @@
"""
Output 'phon' values in 'stem + affix' notation.
"""
- return dumper.represent_scalar(u'!phon', u'%s + %s' % \
+ return dumper.represent_scalar('!phon', '%s + %s' % \
(data['stem'], data['affix']))
yaml.add_representer(Phon, phon_representer)
@@ -61,7 +61,7 @@
stem, affix = [normalize(s) for s in value.split('+')]
return Phon(stem, affix)
-yaml.add_constructor(u'!phon', phon_constructor)
+yaml.add_constructor('!phon', phon_constructor)
#following causes YAML to barf for some reason:
#pattern = re.compile(r'^(\?)?\w+\s*\+\s*(\?)?\w+$')
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/langid.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/langid.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/langid.py
--- ../python3/nltk_contrib/nltk_contrib/misc/langid.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/langid.py (refactored)
@@ -25,7 +25,7 @@
cls = classifier.get_class(gold_data[lang])
if cls == lang:
correct += 1
- print correct, "in", len(gold_data), "correct"
+ print(correct, "in", len(gold_data), "correct")
# features: character bigrams
fd = detect.feature({"char-bigrams" : lambda t: [string.join(t)[n:n+2] for n in range(len(t)-1)]})
@@ -36,11 +36,11 @@
gold_data[lang] = training_data[lang][:50]
training_data[lang] = training_data[lang][100:200]
-print "Cosine classifier: ",
+print("Cosine classifier: ", end=' ')
run(classify.Cosine(fd), training_data, gold_data)
-print "Naivebayes classifier: ",
+print("Naivebayes classifier: ", end=' ')
run(classify.NaiveBayes(fd), training_data, gold_data)
-print "Spearman classifier: ",
+print("Spearman classifier: ", end=' ')
run(classify.Spearman(fd), training_data, gold_data)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py
--- ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py (refactored)
@@ -16,7 +16,7 @@
# TODO: remove Unix dependencies
-import Tkinter
+import tkinter
import os, re, sys, types, string, glob, time, md5
from nltk_contrib.fsa import *
@@ -37,7 +37,7 @@
and we want batch mode, big file, or big input test with output.
"""
###########################################################################
-from ScrolledText import ScrolledText
+from tkinter.scrolledtext import ScrolledText
class KimmoGUI:
def __init__(self, grammar, text, title='Kimmo Interface v1.78'):
@@ -58,46 +58,46 @@
self.helpFilename = 'kimmo.help'
- self._root = Tkinter.Tk()
+ self._root = tkinter.Tk()
self._root.title(title)
- ctlbuttons = Tkinter.Frame(self._root)
+ ctlbuttons = tkinter.Frame(self._root)
ctlbuttons.pack(side='top', fill='x')
- level1 = Tkinter.Frame(self._root)
+ level1 = tkinter.Frame(self._root)
level1.pack(side='top', fill='none')
- Tkinter.Frame(self._root).pack(side='top', fill='none')
- level2 = Tkinter.Frame(self._root)
+ tkinter.Frame(self._root).pack(side='top', fill='none')
+ level2 = tkinter.Frame(self._root)
level2.pack(side='top', fill='x')
- buttons = Tkinter.Frame(self._root)
+ buttons = tkinter.Frame(self._root)
buttons.pack(side='top', fill='none')
- batchFrame = Tkinter.Frame(self._root)
+ batchFrame = tkinter.Frame(self._root)
batchFrame.pack(side='top', fill='x')
- self.batchpath = Tkinter.StringVar()
- Tkinter.Label(batchFrame, text="Batch File:").pack(side='left')
- Tkinter.Entry(batchFrame, background='white', foreground='black',
+ self.batchpath = tkinter.StringVar()
+ tkinter.Label(batchFrame, text="Batch File:").pack(side='left')
+ tkinter.Entry(batchFrame, background='white', foreground='black',
width=30, textvariable=self.batchpath).pack(side='left')
- Tkinter.Button(batchFrame, text='Go!',
+ tkinter.Button(batchFrame, text='Go!',
background='#a0c0c0', foreground='black',
command=self.batch).pack(side='left')
- self.debugWin = Tkinter.StringVar() # change to a window and field eventually.
- Tkinter.Entry(batchFrame, background='grey', foreground='red',
+ self.debugWin = tkinter.StringVar() # change to a window and field eventually.
+ tkinter.Entry(batchFrame, background='grey', foreground='red',
width=30, textvariable=self.debugWin).pack(side='right')
- self.wordIn = Tkinter.StringVar()
- Tkinter.Label(level2, text="Generate or Recognize:").pack(side='left')
- Tkinter.Entry(level2, background='white', foreground='black',
+ self.wordIn = tkinter.StringVar()
+ tkinter.Label(level2, text="Generate or Recognize:").pack(side='left')
+ tkinter.Entry(level2, background='white', foreground='black',
width=30, textvariable=self.wordIn).pack(side='left')
- lexiconFrame = Tkinter.Frame(level1)
- Tkinter.Label(lexiconFrame, text="Lexicon & Alternations").pack(side='top',
+ lexiconFrame = tkinter.Frame(level1)
+ tkinter.Label(lexiconFrame, text="Lexicon & Alternations").pack(side='top',
fill='x')
self.lexicon = ScrolledText(lexiconFrame, background='white',
foreground='black', width=50, height=36, wrap='none')
# setup the scrollbar
- scroll = Tkinter.Scrollbar(lexiconFrame, orient='horizontal',command=self.lexicon.xview)
+ scroll = tkinter.Scrollbar(lexiconFrame, orient='horizontal',command=self.lexicon.xview)
scroll.pack(side='bottom', fill='x')
self.lexicon.configure(xscrollcommand = scroll.set)
@@ -105,36 +105,36 @@
self.lexicon.pack(side='top')
- midFrame = Tkinter.Frame(level1)
- rulesFrame = Tkinter.Frame(midFrame)
+ midFrame = tkinter.Frame(level1)
+ rulesFrame = tkinter.Frame(midFrame)
rulesFrame.pack(side='top', fill='x')
- Tkinter.Label(rulesFrame, text="Rules/Subsets").pack(side='top',
+ tkinter.Label(rulesFrame, text="Rules/Subsets").pack(side='top',
fill='x')
self.rules = ScrolledText(rulesFrame, background='white',
foreground='black', width=60, height=19, wrap='none')
# setup the scrollbar
- scroll = Tkinter.Scrollbar(rulesFrame, orient='horizontal',command=self.rules.xview)
+ scroll = tkinter.Scrollbar(rulesFrame, orient='horizontal',command=self.rules.xview)
scroll.pack(side='bottom', fill='x')
self.rules.configure(xscrollcommand = scroll.set)
self.rules.pack(side='top')
- midbetweenFrame = Tkinter.Frame(midFrame)
+ midbetweenFrame = tkinter.Frame(midFrame)
midbetweenFrame.pack(side='top', fill='x')
- Tkinter.Button(midbetweenFrame, text='clear',
+ tkinter.Button(midbetweenFrame, text='clear',
background='#f0f0f0', foreground='black',
- command= lambda start=1.0, end=Tkinter.END : self.results.delete(start,end)
+ command= lambda start=1.0, end=tkinter.END : self.results.delete(start,end)
).pack(side='right')
- Tkinter.Label(midbetweenFrame,
+ tkinter.Label(midbetweenFrame,
text="Results ").pack(side='right')
self.results = ScrolledText(midFrame, background='white',
foreground='black', width=60, height=13, wrap='none')
# setup the scrollbar
- scroll = Tkinter.Scrollbar(midFrame, orient='horizontal',command=self.results.xview)
+ scroll = tkinter.Scrollbar(midFrame, orient='horizontal',command=self.results.xview)
scroll.pack(side='bottom', fill='x')
self.results.configure(xscrollcommand = scroll.set)
@@ -151,13 +151,13 @@
self.alternation.pack(side='top')
"""
- Tkinter.Button(ctlbuttons, text='Quit',
+ tkinter.Button(ctlbuttons, text='Quit',
background='#a0c0c0', foreground='black',
command=self.destroy).pack(side='left')
- self.loadMenuButton = Tkinter.Menubutton(ctlbuttons, text='Load', background='#a0c0c0', foreground='black', relief='raised')
+ self.loadMenuButton = tkinter.Menubutton(ctlbuttons, text='Load', background='#a0c0c0', foreground='black', relief='raised')
self.loadMenuButton.pack(side='left')
- self.loadMenu=Tkinter.Menu(self.loadMenuButton,tearoff=0)
+ self.loadMenu=tkinter.Menu(self.loadMenuButton,tearoff=0)
self.loadMenu.add_command(label='Load Lexicon', underline=0,command = lambda filetype='.lex', targetWindow = self.lexicon, tf = 'l' : self.loadTypetoTarget(filetype, targetWindow, tf))
self.loadMenu.add_command(label='Load Rules', underline=0,command = lambda filetype='.rul', targetWindow = self.rules, tf = 'r' : self.loadTypetoTarget(filetype, targetWindow, tf))
@@ -166,9 +166,9 @@
#
- self.saveMenuButton = Tkinter.Menubutton(ctlbuttons, text='Save',background='#a0c0c0', foreground='black', relief='raised')
+ self.saveMenuButton = tkinter.Menubutton(ctlbuttons, text='Save',background='#a0c0c0', foreground='black', relief='raised')
self.saveMenuButton.pack(side='left')
- self.saveMenu=Tkinter.Menu(self.saveMenuButton,tearoff=0)
+ self.saveMenu=tkinter.Menu(self.saveMenuButton,tearoff=0)
self.saveMenu.add_command(label='Save Lexicon', underline=0,command = lambda filename=self.lexfilename, sourceWindow = self.lexicon : self.writeToFilefromWindow(filename, sourceWindow,'w',0,'l'))
self.saveMenu.add_command(label='Save Rules', underline=0,command = lambda filename=self.rulfilename, sourceWindow = self.rules : self.writeToFilefromWindow(filename, sourceWindow,'w',0,'r'))
self.saveMenu.add_command(label='Save Results', underline=0,command = lambda filename='.results', sourceWindow = self.results : self.writeToFilefromWindow(filename, sourceWindow,'w',0))
@@ -176,12 +176,12 @@
self.saveMenuButton["menu"]=self.saveMenu
- Tkinter.Label(ctlbuttons, text=" Preset:").pack(side='left')
-
- self.configValue = Tkinter.StringVar()
- self.configsMenuButton = Tkinter.Menubutton(ctlbuttons, text='Configs', background='#a0c0c0', foreground='black', relief='raised')
+ tkinter.Label(ctlbuttons, text=" Preset:").pack(side='left')
+
+ self.configValue = tkinter.StringVar()
+ self.configsMenuButton = tkinter.Menubutton(ctlbuttons, text='Configs', background='#a0c0c0', foreground='black', relief='raised')
self.configsMenuButton.pack(side='left')
- self.configsMenu=Tkinter.Menu(self.configsMenuButton,tearoff=0)
+ self.configsMenu=tkinter.Menu(self.configsMenuButton,tearoff=0)
# read the directory for cfgs, add them to the menu
# add path expander, to expand ~ & given home dirs.
@@ -210,21 +210,21 @@
# background='#b0f0d0', foreground='#008b45',
# command=self.generate).pack(side='right')
- self.tracingbtn = Tkinter.Button(ctlbuttons, text='Tracing',
+ self.tracingbtn = tkinter.Button(ctlbuttons, text='Tracing',
background='#fff0f0', foreground='black',
command=lambda : self.create_destroyDebugTracing()).pack(side='right')
- self.graphMenuButton = Tkinter.Menubutton(ctlbuttons, text='Graph', background='#d0d0e8', foreground='black', relief='raised')
+ self.graphMenuButton = tkinter.Menubutton(ctlbuttons, text='Graph', background='#d0d0e8', foreground='black', relief='raised')
self.graphMenuButton.pack(side='right')
- self.graphMenu=Tkinter.Menu(self.graphMenuButton,tearoff=0)
+ self.graphMenu=tkinter.Menu(self.graphMenuButton,tearoff=0)
self.graphMenu.add_command(label='Graph Lexicon', underline=0,command = lambda which = 'l' : self.graph(which))
self.graphMenu.add_command(label='Graph FSA Rules', underline=0,command = lambda which = 'r' : self.graph(which))
# self.loadMenu.add_command(label='Load Lexicon', underline=0,command = lambda filetype='.lex', targetWindow = self.lexicon : loadTypetoTarget(self, filetype, targetWindow))
self.graphMenuButton["menu"]=self.graphMenu
- self.helpbtn = Tkinter.Button(ctlbuttons, text='Help',
+ self.helpbtn = tkinter.Button(ctlbuttons, text='Help',
background='#f0fff0', foreground='black',
command=self.kimmoHelp).pack(side='right')
@@ -233,10 +233,10 @@
midFrame.pack(side='left')
# alternationFrame.pack(side='left')
- Tkinter.Button(level2, text='Generate',
+ tkinter.Button(level2, text='Generate',
background='#a0c0c0', foreground='black',
command=self.generate).pack(side='left')
- Tkinter.Button(level2, text='Recognize',
+ tkinter.Button(level2, text='Recognize',
background='#a0c0c0', foreground='black',
command=self.recognize).pack(side='left')
@@ -267,16 +267,16 @@
# Enter mainloop.
- Tkinter.mainloop()
+ tkinter.mainloop()
except:
- print 'Error creating Tree View'
+ print('Error creating Tree View')
self.destroy()
raise
def init_menubar(self):
- menubar = Tkinter.Menu(self._root)
-
- filemenu = Tkinter.Menu(menubar, tearoff=0)
+ menubar = tkinter.Menu(self._root)
+
+ filemenu = tkinter.Menu(menubar, tearoff=0)
filemenu.add_command(label='Save Rules', underline=0,
command=self.save, accelerator='Ctrl-s')
self._root.bind('', self.save)
@@ -308,26 +308,26 @@
else:
try:
# have in its own special di decial class
- self.dbgTracing = Tkinter.Toplevel()
+ self.dbgTracing = tkinter.Toplevel()
self.dbgTracing.title("Tracing/Debug")
- dbgTraceFrame2 = Tkinter.Frame(self.dbgTracing)
+ dbgTraceFrame2 = tkinter.Frame(self.dbgTracing)
dbgTraceFrame2.pack(side='top', fill='x')
- dbgTraceFrame = Tkinter.Frame(self.dbgTracing)
+ dbgTraceFrame = tkinter.Frame(self.dbgTracing)
dbgTraceFrame.pack(side='top', fill='x',expand='yes')
self.traceWindow = ScrolledText(dbgTraceFrame, background='#f4f4f4',
foreground='#aa0000', width=45, height=24, wrap='none')
- Tkinter.Button(dbgTraceFrame2, text='clear',
+ tkinter.Button(dbgTraceFrame2, text='clear',
background='#a0c0c0', foreground='black',
- command= lambda start=1.0, end=Tkinter.END : self.traceWindow.delete(start,end)
+ command= lambda start=1.0, end=tkinter.END : self.traceWindow.delete(start,end)
).pack(side='right')
- Tkinter.Button(dbgTraceFrame2, text='Save',
+ tkinter.Button(dbgTraceFrame2, text='Save',
background='#a0c0c0', foreground='black',
command= lambda file=self.kimmoResultFile,windowName=self.traceWindow,mode='w',auto=0 : self.writeToFilefromWindow(file,windowName,mode,auto)
).pack(side='left')
- scroll = Tkinter.Scrollbar(dbgTraceFrame, orient='horizontal',command=self.traceWindow.xview)
+ scroll = tkinter.Scrollbar(dbgTraceFrame, orient='horizontal',command=self.traceWindow.xview)
scroll.pack(side='bottom', fill='x')
self.traceWindow.configure(xscrollcommand = scroll.set)
@@ -340,7 +340,7 @@
self.dbgTracing.protocol("WM_DELETE_WINDOW", self.create_destroyDebugTracing)
except:
- print 'Error creating Tree View'
+ print('Error creating Tree View')
self.dbgTracing.destroy()
self.dbgTracing = None
self.debug = False
@@ -355,7 +355,7 @@
if not (auto and windowName and filename):
- from tkFileDialog import asksaveasfilename
+ from tkinter.filedialog import asksaveasfilename
ftypes = [('Text file', '.txt'),('Rule file', '.rul'),('Lexicon file', '.lex'),('Alternations file', '.alt'),
('All files', '*')]
filename = asksaveasfilename(filetypes=ftypes,
@@ -365,7 +365,7 @@
self.guiError('Need File Name')
return
f = open(filename, 'w')
- f.write(windowName.get(1.0,Tkinter.END))
+ f.write(windowName.get(1.0,tkinter.END))
f.close()
if filename:
@@ -401,7 +401,7 @@
"""
def configLoader(self,*args):
- print args[0]
+ print(args[0])
filename = args[0]
# if arg is a valid file, load by line.
@@ -471,7 +471,7 @@
text.append(line)
# empty the window now that the file was valid
- windowField.delete(1.0, Tkinter.END)
+ windowField.delete(1.0, tkinter.END)
windowField.insert(1.0, '\n'.join(text))
@@ -483,7 +483,7 @@
if not (fileType and targetWindow): return
- from tkFileDialog import askopenfilename
+ from tkinter.filedialog import askopenfilename
ftypes = [(fileType, fileType)]
filename = askopenfilename(filetypes=ftypes, defaultextension=fileType)
@@ -502,7 +502,7 @@
# graphical interface to file loading.
"Load rule/lexicon set from a text file"
- from tkFileDialog import askopenfilename
+ from tkinter.filedialog import askopenfilename
ftypes = [('Text file', '.txt'),
('All files', '*')]
# filename = askopenfilename(filetypes=ftypes, defaultextension='.txt')
@@ -556,10 +556,10 @@
def clear(self, *args):
"Clears the grammar and lexical and sentence inputs"
- self.lexicon.delete(1.0, Tkinter.END)
- self.rules.delete(1.0, Tkinter.END)
+ self.lexicon.delete(1.0, tkinter.END)
+ self.rules.delete(1.0, tkinter.END)
# self.alternation.delete(1.0, Tkinter.END)
- self.results.delete(1.0, Tkinter.END)
+ self.results.delete(1.0, tkinter.END)
def destroy(self, *args):
if self._root is None: return
@@ -570,10 +570,10 @@
# for single stepping through a trace.
# need to make the kimmo class capable of being interrupted & resumed.
def step(self, *args):
- print 'a'
+ print('a')
def singlestep(self, *args):
- print 'a'
+ print('a')
def batch(self, *args):
filename = self.batchpath.get()
@@ -704,10 +704,10 @@
# check & set path, if necessary, need read and write access to path
path = ''
pathstatus = os.stat('./') # 0600 is r/w, binary evaluation
- if not ((pathstatus[0] & 0600) == 0600):
+ if not ((pathstatus[0] & 0o600) == 0o600):
path = '/tmp/' + str(os.environ.get("USER")) + '/' # need terminating /
if not os.path.exists(path):
- os.mkdir(path,0777)
+ os.mkdir(path,0o777)
pathre = re.compile(r"^.*\/")
@@ -779,7 +779,7 @@
matchIdx = '1.0'
matchRight = '1.0'
while matchIdx != '':
- matchIdx = window.search(word,matchRight,count=1,stopindex=Tkinter.END)
+ matchIdx = window.search(word,matchRight,count=1,stopindex=tkinter.END)
if matchIdx == '': break
strptr = matchIdx.split(".")
@@ -799,11 +799,11 @@
or recognize. (i.e. loading all rules, lexicon, and alternations
"""
# only initialize Kimmo if the contents of the *rules* have changed
- tmprmd5 = md5.new(self.rules.get(1.0, Tkinter.END))
- tmplmd5 = md5.new(self.lexicon.get(1.0, Tkinter.END))
+ tmprmd5 = md5.new(self.rules.get(1.0, tkinter.END))
+ tmplmd5 = md5.new(self.lexicon.get(1.0, tkinter.END))
if (not self.kimmoinstance) or (self.rulemd5 != tmprmd5) or (self.lexmd5 != tmplmd5):
self.guiError("Creating new Kimmo instance")
- self.kimmoinstance = KimmoControl(self.lexicon.get(1.0, Tkinter.END),self.rules.get(1.0, Tkinter.END),'','',self.debug)
+ self.kimmoinstance = KimmoControl(self.lexicon.get(1.0, tkinter.END),self.rules.get(1.0, tkinter.END),'','',self.debug)
self.guiError("")
self.rulemd5 = tmprmd5
self.lexmd5 = tmplmd5
@@ -820,7 +820,7 @@
def refresh(self, *args):
if self._root is None: return
- print self.wordIn.get()
+ print(self.wordIn.get())
# CAPTURE PYTHON-KIMMO OUTPUT
@@ -830,8 +830,8 @@
# if there is a trace/debug window
if self.dbgTracing:
- self.traceWindow.insert(Tkinter.END, string.join(args," "))
- self.traceWindow.see(Tkinter.END)
+ self.traceWindow.insert(tkinter.END, string.join(args," "))
+ self.traceWindow.see(tkinter.END)
# otherwise, just drop the output.
@@ -858,7 +858,7 @@
# helpText = Tkinter.StringVar()
helpText = ''
try: f = open(self.helpFilename, 'r')
- except IOError, e:
+ except IOError as e:
self.guiError("HelpFile not loaded")
return
@@ -873,7 +873,7 @@
helpText = re.sub("\r","",helpText)
- helpWindow = Tkinter.Toplevel()
+ helpWindow = tkinter.Toplevel()
helpWindow.title("PyKimmo Documentation & Help")
# help = Tkinter.Label(helpWindow,textvariable=helpText, justify='left' ) #
@@ -884,14 +884,14 @@
help.pack(side='top')
help.insert(1.0, helpText)
# setup the scrollbar
- scroll = Tkinter.Scrollbar(helpWindow, orient='horizontal',command=help.xview)
+ scroll = tkinter.Scrollbar(helpWindow, orient='horizontal',command=help.xview)
scroll.pack(side='bottom', fill='x')
help.configure(xscrollcommand = scroll.set)
# now highlight up the file
- matchIdx = Tkinter.END
- matchRight = Tkinter.END
- matchLen = Tkinter.IntVar()
+ matchIdx = tkinter.END
+ matchRight = tkinter.END
+ matchLen = tkinter.IntVar()
tagId = 1
while 1:
matchIdx = help.search(r"::[^\n]*::",matchIdx, stopindex=1.0, backwards=True, regexp=True, count=matchLen )
@@ -900,7 +900,7 @@
matchIdxFields = matchIdx.split(".")
matchLenStr = matchIdxFields[0] + "." + str(string.atoi(matchIdxFields[1],10) + matchLen.get())
- print (matchIdx, matchLenStr)
+ print((matchIdx, matchLenStr))
help.tag_add(tagId, matchIdx, matchLenStr )
help.tag_configure(tagId, background='aquamarine', foreground='blue', underline=True)
tagId += 1
@@ -974,11 +974,11 @@
class tkImageView:
def __init__(self, imagefileName, title):
- self._root = Tkinter.Toplevel()
+ self._root = tkinter.Toplevel()
self._root.title(title + ' (' + imagefileName + ')')
- self.image = Tkinter.PhotoImage("LGraph",file=imagefileName)
-
- Tkinter.Label(self._root, image=self.image).pack(side='top',fill='x')
+ self.image = tkinter.PhotoImage("LGraph",file=imagefileName)
+
+ tkinter.Label(self._root, image=self.image).pack(side='top',fill='x')
# self._root.mainloop()
def destroy(self, *args):
@@ -989,11 +989,11 @@
######################### Dialog Boxes ##############################
-class ListDialog(Tkinter.Toplevel):
+class ListDialog(tkinter.Toplevel):
def __init__(self, parent, listOptions, title = None):
- Tkinter.Toplevel.__init__(self, parent)
+ tkinter.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
@@ -1003,13 +1003,13 @@
self.result = None
- body = Tkinter.Frame(self)
+ body = tkinter.Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
- box = Tkinter.Frame(self)
- Tkinter.Label(box,text="Select an FSA to graph").pack(side='top',fill='x')
+ box = tkinter.Frame(self)
+ tkinter.Label(box,text="Select an FSA to graph").pack(side='top',fill='x')
box.pack()
@@ -1043,13 +1043,13 @@
def listbox(self, listOptions):
- box = Tkinter.Frame(self)
- self.lb = Tkinter.Listbox(box,height=len(listOptions),width=30,background='#f0f0ff', selectbackground='#c0e0ff'
+ box = tkinter.Frame(self)
+ self.lb = tkinter.Listbox(box,height=len(listOptions),width=30,background='#f0f0ff', selectbackground='#c0e0ff'
,selectmode='single')
self.lb.pack()
for x in listOptions:
- self.lb.insert(Tkinter.END,x)
+ self.lb.insert(tkinter.END,x)
box.pack()
@@ -1057,11 +1057,11 @@
# add standard button box. override if you don't want the
# standard buttons
- box = Tkinter.Frame(self)
-
- w = Tkinter.Button(box, text="OK", width=10, command=self.ok, default="active")
+ box = tkinter.Frame(self)
+
+ w = tkinter.Button(box, text="OK", width=10, command=self.ok, default="active")
w.pack(side="left", padx=5, pady=5)
- w = Tkinter.Button(box, text="Cancel", width=10, command=self.cancel)
+ w = tkinter.Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.ok)
@@ -1245,15 +1245,15 @@
self.s = KimmoRuleSet(self.ksubsets, self.kdefaults, self.krules)
self.s.debug = debug
self.ok = 1
- except RuntimeError, e:
+ except RuntimeError as e:
self.errors = ('Caught:' + str(e) + ' ' + self.errors)
- print 'Caught:', e
- print "Setup of the kimmoinstance failed. Most likely cause"
- print "is infinite recursion due to self-referential lexicon"
- print "For instance:"
- print "Begin: Begin Noun End"
- print "Begin is pointing to itself. Simple example, but check"
- print "to insure no directed loops"
+ print('Caught:', e)
+ print("Setup of the kimmoinstance failed. Most likely cause")
+ print("is infinite recursion due to self-referential lexicon")
+ print("For instance:")
+ print("Begin: Begin Noun End")
+ print("Begin is pointing to itself. Simple example, but check")
+ print("to insure no directed loops")
self.ok = 0
@@ -1313,8 +1313,8 @@
results_string += (batch_result_str)
# place a separator between results
- print '----- '+ time.strftime("%a, %d %b %Y %I:%M %p", time.gmtime()) +' -----\n'
- print results_string
+ print('----- '+ time.strftime("%a, %d %b %Y %I:%M %p", time.gmtime()) +' -----\n')
+ print(results_string)
@@ -2213,7 +2213,7 @@
def name(self): return self._name
def pairs(self): return self._pairs
def start(self): return self._state_descriptions[0][0]
- def is_state(self, index): return self.transitions.has_key(index)
+ def is_state(self, index): return index in self.transitions
def contains_final(self, indices):
@@ -2283,7 +2283,7 @@
# print 'any state match'
# {col num, next state num (0 if fail), is final state}
# if transition row is valid
- if self.transitions.has_key(self.transitions[index][i]): ft = self.is_final[self.transitions[index][i]]
+ if self.transitions[index][i] in self.transitions: ft = self.is_final[self.transitions[index][i]]
else : ft = ''
any_next_states_ary.append([ i, self.transitions[index][i], ft, pair.__repr__() ] )
if not any_next_state:
@@ -2297,7 +2297,7 @@
# times? (i.e. our state is already in next_state
next_state_isset = 1
next_state = self.transitions[index][i]
- if self.transitions.has_key(next_state):
+ if next_state in self.transitions:
if not(next_state in next_states):
next_states.append(next_state)
@@ -2349,12 +2349,12 @@
for w in words:
if len(w.letters()) <= word_position: continue
fc = w.letters()[word_position]
- if first_chars.has_key(fc):
+ if fc in first_chars:
first_chars[fc].append(w)
else:
first_chars[fc] = [ w ]
sub_tries = []
- for c, sub_words in first_chars.items():
+ for c, sub_words in list(first_chars.items()):
sub_tries.append( (c, self.build_trie(sub_words, word_position+1)) )
return ( [w for w in words if len(w.letters()) == word_position], sub_tries )
@@ -2410,12 +2410,12 @@
# print 'current alternation: ' + name
if name == None:
return []
- elif self.alternations.has_key(name):
+ elif name in self.alternations:
result = []
for ln in self.alternations[name].lexicon_names():
result.extend(self._collect(ln))
return result
- elif self.lexicons.has_key(name):
+ elif name in self.lexicons:
return [ self.lexicons[name] ]
else:
# raise ValueError('no lexicon or alternation named ' + name)
@@ -2502,21 +2502,21 @@
padstring = ''
for x in range(position): padstring = padstring + ' '
- print '%s%d %s:%s \n' % (padstring, position, this_input, this_output),
- print '%s%d: Input: ' % (padstring, position,),
+ print('%s%d %s:%s \n' % (padstring, position, this_input, this_output), end=' ')
+ print('%s%d: Input: ' % (padstring, position,), end=' ')
for i in input:
- print ' ' + i + ' ',
+ print(' ' + i + ' ', end=' ')
if this_input:
- print '[' + this_input + ']...',
- print
-
-
- print '%s%d> Output: ' % (padstring, position,),
+ print('[' + this_input + ']...', end=' ')
+ print()
+
+
+ print('%s%d> Output: ' % (padstring, position,), end=' ')
for o in output:
- print ' ' + o + ' ',
+ print(' ' + o + ' ', end=' ')
if this_output:
- print '<' + this_output + '>...',
- print
+ print('<' + this_output + '>...', end=' ')
+ print()
# for (start, rule, fsa_states, required_truth_value) in rule_states:
@@ -2524,7 +2524,7 @@
if False: # morphological_state:
- print ' possible input chars = %s' % invert.possible_next_characters(morphological_state)
+ print(' possible input chars = %s' % invert.possible_next_characters(morphological_state))
# print morphological_state
@@ -2548,7 +2548,7 @@
if ((position >= len(input_tokens)) ): # and (not morphological_state)
- if (self.debug) : print ' AT END OF WORD'
+ if (self.debug) : print(' AT END OF WORD')
# FOR RECOGNIZER
# this will yield some words twice, not all
# also, recognizer is failing to put on the added information like "+genetive"
@@ -2596,16 +2596,16 @@
if (required_truth_value != truth_value):
if (self.debug):
- print ' BLOCKED by rule {%d %s %s}' % (start, rule, required_truth_value)
- print fsa_states
+ print(' BLOCKED by rule {%d %s %s}' % (start, rule, required_truth_value))
+ print(fsa_states)
break
else:
if 0: # (self.debug):
- print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
+ print(' passed rule {%d %s %s}' % (start, rule, required_truth_value))
else:
if (self.debug):
- print ' SUCCESS!'
+ print(' SUCCESS!')
yield result_str, result_words
else:
if morphological_state: # recognizer; get the next possible surface chars that can result in
@@ -2666,7 +2666,7 @@
break
else:
if (0): # (self.debug):
- print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
+ print(' passed rule {%d %s %s}' % (start, rule, required_truth_value))
elif (len(next_fsa_state_set) == 0):
# if it isn't true, then it will have to fail, bcs we are at
# the end of the state set.
@@ -2676,15 +2676,15 @@
break
else:
if (0): # (self.debug):
- print ' passed rule {%d %s %s}' % (start, rule, required_truth_value)
+ print(' passed rule {%d %s %s}' % (start, rule, required_truth_value))
else:
next_rule_states.append( (start, rule, next_fsa_state_set, required_truth_value) )
- if (self.debug) : print rule_state_debug
+ if (self.debug) : print(rule_state_debug)
if (fail):
if (self.debug):
- print ' BLOCKED by rule %s' % (fail,)
+ print(' BLOCKED by rule %s' % (fail,))
continue
@@ -2703,7 +2703,7 @@
if (rule.rightFSA()):
if (self.debug):
- print ' adding rule {%d %s %s}' % (position, rule, required_truth_value)
+ print(' adding rule {%d %s %s}' % (position, rule, required_truth_value))
next_rule_states.append( (position, rule, [ rule.rightFSA().start() ], required_truth_value) )
else:
if (required_truth_value == False):
@@ -2711,7 +2711,7 @@
continue
else:
if (0): # (self.debug):
- print ' passed rule ' + str(rule)
+ print(' passed rule ' + str(rule))
# if did not fail, call recursively on next chars
if (fail == None):
@@ -2748,7 +2748,7 @@
yield o
else:
if (self.debug):
- print ' BLOCKED by rule ' + str(fail)
+ print(' BLOCKED by rule ' + str(fail))
def _initial_rule_states(self):
return [ (0, rule, [ rule.start() ], True) for rule in self.rules() if isinstance(rule, KimmoFSARule)]
@@ -2771,9 +2771,9 @@
if not morphology_state:
- print "Bad Morphological State, failing recognition"
+ print("Bad Morphological State, failing recognition")
return
- if (self.debug) : print 'recognize: ' + input_tokens
+ if (self.debug) : print('recognize: ' + input_tokens)
# print output_words
for o in self._generate(input_tokens, 0, self._initial_rule_states(), morphology_state, [], [], '',
output_words, invert):
@@ -2828,18 +2828,18 @@
path = os.path.expanduser(filename)
try:
f = open(path, 'r')
- except IOError, e:
+ except IOError as e:
path = find_corpus_file("kimmo", filenRefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py
RefactoringTool: Warnings/messages while refactoring:
RefactoringTool: ### In file ../python3/nltk_contrib/nltk_contrib/misc/kimmo.py ###
RefactoringTool: Line 963: could not convert: raise "Dummy"
RefactoringTool: Python 3 does not support string exceptions
ame)
try:
f = open(path, 'r')
- except IOError, e:
+ except IOError as e:
if gui:
gui.guiError(str(e))
else:
- print str(e)
- print "FAILURE"
+ print(str(e))
+ print("FAILURE")
return ""
- print "Loaded:", path
+ print("Loaded:", path)
return f
# MAIN
@@ -2866,20 +2866,20 @@
elif x == "debug": console_debug = 1
- print 'Tips:'
- print 'kimmo.cfg is loaded by default, so if you name your project that, '
- print "it will be loaded at startup\n"
-
- print 'For commandline operation:'
- print ' (for instance if you want to use a different editor)'
- print "To Recognize:"
- print " % python kimmo.py english.lex english.rul -r:cats"
- print "To Generate:"
- print " % python kimmo.py english.lex english.rul -g:cat+s"
- print "To Batch Test:"
- print " % python kimmo.py english.lex english.rul english.batch_test"
- print "With Debug and Tracing:"
- print " % python kimmo.py english.lex english.rul -r:cats debug\n"
+ print('Tips:')
+ print('kimmo.cfg is loaded by default, so if you name your project that, ')
+ print("it will be loaded at startup\n")
+
+ print('For commandline operation:')
+ print(' (for instance if you want to use a different editor)')
+ print("To Recognize:")
+ print(" % python kimmo.py english.lex english.rul -r:cats")
+ print("To Generate:")
+ print(" % python kimmo.py english.lex english.rul -g:cat+s")
+ print("To Batch Test:")
+ print(" % python kimmo.py english.lex english.rul english.batch_test")
+ print("With Debug and Tracing:")
+ print(" % python kimmo.py english.lex english.rul -r:cats debug\n")
# print filename_lex
@@ -2894,17 +2894,17 @@
# creation failed, stop
if not kimmoinstance.ok :
- print kimmoinstance.errors
+ print(kimmoinstance.errors)
sys.exit()
if recognize_string:
recognize_results = kimmoinstance.recognize(recognize_string)
- print recognize_results
+ print(recognize_results)
if generate_string:
generate_results = kimmoinstance.generate(generate_string)
- print generate_results # remember to format
+ print(generate_results) # remember to format
if filename_batch_test: # run a batch
kimmoinstance.batch(filename_batch_test)
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/huffman.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/huffman.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/huffman.py
--- ../python3/nltk_contrib/nltk_contrib/misc/huffman.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/huffman.py (refactored)
@@ -5,7 +5,7 @@
from operator import itemgetter
def huffman_tree(text):
- coding = nltk.FreqDist(text).items()
+ coding = list(nltk.FreqDist(text).items())
coding.sort(key=itemgetter(1))
while len(coding) > 1:
a, b = coding[:2]
@@ -67,8 +67,8 @@
text_len = len(text)
comp_len = len(encode(code_tree, text)) / 8.0
compression = (text_len - comp_len) / text_len
- print compression,
- print
+ print(compression, end=' ')
+ print()
trial(train1, [test1, test2, test3])
trial(train2, [test1, test2, test3])
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/fsa.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/fsa.py
--- ../python3/nltk_contrib/nltk_contrib/misc/fsa.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/fsa.py (refactored)
@@ -64,8 +64,8 @@
A generator that yields each transition arrow in the FSA in the form
(source, label, target).
"""
- for (state, map) in self._transitions.items():
- for (symbol, targets) in map.items():
+ for (state, map) in list(self._transitions.items()):
+ for (symbol, targets) in list(map.items()):
for target in targets:
yield (state, symbol, target)
@@ -74,7 +74,7 @@
A generator for all possible labels taking state s1 to state s2.
"""
map = self._transitions.get(s1, {})
- for (symbol, targets) in map.items():
+ for (symbol, targets) in list(map.items()):
if s2 in targets: yield symbol
def sigma(self):
@@ -127,7 +127,7 @@
@returns: a list of all states in the FSA.
@rtype: list
"""
- return self._transitions.keys()
+ return list(self._transitions.keys())
def add_final(self, state):
"""
@@ -177,9 +177,9 @@
@param s2: the destination of the transition
"""
if s1 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
if s2 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
self._add_transition(self._transitions, s1, label, s2)
self._add_transition(self._reverse, s2, label, s1)
@@ -203,16 +203,16 @@
@param s2: the destination of the transition
"""
if s1 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
if s2 not in self.states():
- raise ValueError, "State %s does not exist" % s1
+ raise ValueError("State %s does not exist" % s1)
self._del_transition(self._transitions, s1, label, s2)
self._del_transition(self._reverse, s2, label, s1)
def delete_state(self, state):
"Removes a state and all its transitions from the FSA."
if state not in self.states():
- raise ValueError, "State %s does not exist" % state
+ raise ValueError("State %s does not exist" % state)
for (s1, label, s2) in self.incident_transitions(state):
self.delete(s1, label, s2)
del self._transitions[state]
@@ -226,10 +226,10 @@
result = set()
forward = self._transitions[state]
backward = self._reverse[state]
- for label, targets in forward.items():
+ for label, targets in list(forward.items()):
for target in targets:
result.add((state, label, target))
- for label, targets in backward.items():
+ for label, targets in list(backward.items()):
for target in targets:
result.add((target, label, state))
return result
@@ -239,9 +239,9 @@
Assigns a state a new identifier.
"""
if old not in self.states():
- raise ValueError, "State %s does not exist" % old
+ raise ValueError("State %s does not exist" % old)
if new in self.states():
- raise ValueError, "State %s already exists" % new
+ raise ValueError("State %s already exists" % new)
changes = []
for (s1, symbol, s2) in self.generate_transitions():
if s1 == old and s2 == old:
@@ -274,8 +274,8 @@
Return whether this is a DFA
(every symbol leads from a state to at most one target state).
"""
- for map in self._transitions.values():
- for targets in map.values():
+ for map in list(self._transitions.values()):
+ for targets in list(map.values()):
if len(targets) > 1: return False
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/fsa.py
return True
@@ -287,14 +287,14 @@
"""
next = self.next(state, symbol)
if len(next) > 1:
- raise ValueError, "This FSA is nondeterministic -- use nextStates instead."
+ raise ValueError("This FSA is nondeterministic -- use nextStates instead.")
elif len(next) == 1: return list(next)[0]
else: return None
def forward_traverse(self, state):
"All states reachable by following transitions from a given state."
result = set()
- for (symbol, targets) in self._transitions[state].items():
+ for (symbol, targets) in list(self._transitions[state].items()):
result = result.union(targets)
return result
@@ -302,7 +302,7 @@
"""All states from which a given state is reachable by following
transitions."""
result = set()
- for (symbol, targets) in self._reverse[state].items():
+ for (symbol, targets) in list(self._reverse[state].items()):
result = result.union(targets)
return result
@@ -334,7 +334,7 @@
self._clean_map(self._reverse[state])
def _clean_map(self, map):
- for (key, value) in map.items():
+ for (key, value) in list(map.items()):
if len(value) == 0:
del map[key]
@@ -396,7 +396,7 @@
for label in self.sigma():
nfa_next = tuple(self.e_closure(self.move(map[dfa_state],
label)))
- if map.has_key(nfa_next):
+ if nfa_next in map:
dfa_next = map[nfa_next]
else:
dfa_next = dfa.new_state()
@@ -412,7 +412,7 @@
"Generate all accepting sequences of length at most maxlen."
if maxlen > 0:
if state in self._finals:
- print prefix
+ print(prefix)
for (s1, labels, s2) in self.outgoing_transitions(state):
for label in labels():
self.generate(maxlen-1, s2, prefix+label)
@@ -421,14 +421,14 @@
"""
Print a representation of this FSA (in human-readable YAML format).
"""
- print yaml.dump(self)
+ print(yaml.dump(self))
@classmethod
def from_yaml(cls, loader, node):
map = loader.construct_mapping(node)
result = cls(map.get('sigma', []), {}, map.get('finals', []))
- for (s1, map1) in map['transitions'].items():
- for (symbol, targets) in map1.items():
+ for (s1, map1) in list(map['transitions'].items()):
+ for (symbol, targets) in list(map1.items()):
for s2 in targets:
result.insert(s1, symbol, s2)
return result
@@ -551,19 +551,19 @@
# Use a regular expression to initialize the FSA.
re = 'abcd'
- print 'Regular Expression:', re
+ print('Regular Expression:', re)
re2nfa(fsa, re)
- print "NFA:"
+ print("NFA:")
fsa.pp()
# Convert the (nondeterministic) FSA to a deterministic FSA.
dfa = fsa.dfa()
- print "DFA:"
+ print("DFA:")
dfa.pp()
# Prune the DFA
dfa.prune()
- print "PRUNED DFA:"
+ print("PRUNED DFA:")
dfa.pp()
# Use the FSA to generate all strings of length less than 3
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/didyoumean.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/didyoumean.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/didyoumean.py
--- ../python3/nltk_contrib/nltk_contrib/misc/didyoumean.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/didyoumean.py (refactored)
@@ -31,7 +31,7 @@
def test(self, token):
hashed = self.specialhash(token)
if hashed in self.learned:
- words = self.learned[hashed].items()
+ words = list(self.learned[hashed].items())
sortby(words, 1, reverse=1)
if token in [i[0] for i in words]:
return 'This word seems OK'
@@ -59,7 +59,7 @@
d.learn()
# choice of words to be relevant related to the brown corpus
for i in "birdd, oklaoma, emphasise, bird, carot".split(", "):
- print i, "-", d.test(i)
+ print(i, "-", d.test(i))
if __name__ == "__main__":
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/annotationgraph.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/misc/annotationgraph.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/misc/annotationgraph.py
--- ../python3/nltk_contrib/nltk_contrib/misc/annotationgraph.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/misc/annotationgraph.py (refactored)
@@ -12,7 +12,7 @@
def __init__(self, t):
self._edges = []
self._len = len(t.leaves())
- self._nodes = range(self._len)
+ self._nodes = list(range(self._len))
self._convert(t, 0)
self._index = Index((start, (end, label)) for (start, end, label) in self._edges)
@@ -75,7 +75,7 @@
t = Tree(s)
ag = AnnotationGraph(t)
for p in ag.pas2([]):
- print p
+ print(p)
if __name__ == '__main__':
demo()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/misc/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No files need to be modified.
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasview.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasview.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasview.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasview.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasview.py (refactored)
@@ -1,10 +1,10 @@
from qt import *
from qtcanvas import *
-from treecanvasnode import *
-from nodefeaturedialog import *
-from translator import translate
-from axis import *
-import lpath
+from .treecanvasnode import *
+from .nodefeaturedialog import *
+from .translator import translate
+from .axis import *
+from . import lpath
import math
class FilterExpressionPopup(QLabel):
@@ -59,7 +59,7 @@
s,ans = QInputDialog.getText('Edit Filter Expression','Enter new filter expression',
QLineEdit.Normal,self.text(),self)
if ans:
- s = unicode(s).strip()
+ s = str(s).strip()
if s:
self.node.filterExpression = s
else:
@@ -132,7 +132,7 @@
s,ans = QInputDialog.getText('New Filter Expression','Enter filter expression',
QLineEdit.Normal,s,self)
if ans:
- s = unicode(s).strip()
+ s = str(s).strip()
if s:
if lpath.translate("//A[%s]"%s) is None:
QMessageBox.critical(self,"Error","Invalid filter expression.")
@@ -147,7 +147,7 @@
s,ans = QInputDialog.getText('Edit Label','Enter new label',
QLineEdit.Normal,item.node.label,self)
if ans:
- s = unicode(s).strip()
+ s = str(s).strip()
if s:
if 'originalLabel' not in item.node.data:
item.node.data['originalLabel'] = item.node.label
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasnode.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasnode.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasnode.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasnode.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/treecanvasnode.py (refactored)
@@ -1,6 +1,6 @@
from qt import *
from qtcanvas import *
-from lpathtree_qt import *
+from .lpathtree_qt import *
class Point:
def __init__(self, *args):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/treecanvas.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/treecanvas.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/treecanvas.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/treecanvas.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/treecanvas.py (refactored)
@@ -1,6 +1,6 @@
from qtcanvas import *
from qt import *
-from treecanvasnode import *
+from .treecanvasnode import *
__all__ = ["TreeCanvas"]
@@ -105,7 +105,7 @@
item = node.gui
item2 = node.parent.gui
coords = item.connectingLine(item2)
- apply(node.line.setPoints, coords)
+ node.line.setPoints(*coords)
node.show()
self.collapse(self._data)
@@ -143,7 +143,7 @@
line = QCanvasLine(self)
line.setPen(pen)
node.line = line
- apply(line.setPoints, coords)
+ line.setPoints(*coords)
node.show()
self._w = self._width[self._data]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/translator.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/translator.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/translator.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/translator.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/translator.py (refactored)
@@ -1,5 +1,5 @@
-from StringIO import StringIO
-import at_lite as at
+from io import StringIO
+from . import at_lite as at
__all__ = ["translate", "translate_sub"]
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/sqlviewdialog.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/sqlviewdialog.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/sqlviewdialog.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/sqlviewdialog.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/sqlviewdialog.py (refactored)
@@ -1,5 +1,5 @@
from qt import *
-import lpath
+from . import lpath
class SqlViewDialog(QDialog):
def __init__(self, lpql=None, parent=None, name=None,
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/qba.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/qba.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/qba.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/qba.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/qba.py (refactored)
@@ -2,17 +2,17 @@
import os
from qt import *
from qtcanvas import *
-from treecanvas import *
-from treecanvasview import *
-from lpathtree_qt import *
-from axis import *
-from db import *
-from dbdialog import *
-from sqlviewdialog import *
-from overlay import *
-from translator import translate
-from parselpath import parse_lpath
-from lpath import tokenize
+from .treecanvas import *
+from .treecanvasview import *
+from .lpathtree_qt import *
+from .axis import *
+from .db import *
+from .dbdialog import *
+from .sqlviewdialog import *
+from .overlay import *
+from .translator import translate
+from .parselpath import parse_lpath
+from .lpath import tokenize
class QBA(QMainWindow):
def __init__(self, tree=None):
@@ -171,7 +171,7 @@
"XPM (*.xpm)")
if d.exec_loop() == QDialog.Rejected: return
filenam = d.selectedFile()
- filenam = unicode(filenam)
+ filenam = str(filenam)
self._saveImageDir = os.path.dirname(filenam)
if os.path.exists(filenam):
res = QMessageBox.question(
@@ -262,7 +262,7 @@
app.setMainWidget(w)
if len(sys.argv) == 2:
generator = LPathTreeModel.importTreebank(file(sys.argv[1]))
- w.setTree(generator.next())
+ w.setTree(next(generator))
w.show()
w.setCaption('LPath QBA') # this is only necessary on windows
app.exec_loop()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/parselpath.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/parselpath.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/parselpath.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/parselpath.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/parselpath.py (refactored)
@@ -1,6 +1,6 @@
-from lpath import tokenize
-from lpathtree import LPathTreeModel
-from translator import translate
+from .lpath import tokenize
+from .lpathtree import LPathTreeModel
+from .translator import translate
SCOPE = ['{','}']
BRANCH = ['[',']']
@@ -129,17 +129,17 @@
def f(t, n):
if t is not None:
- print (" "*n) + t.data['label']
+ print((" "*n) + t.data['label'])
for c in t.children:
f(c, n+4)
def g(t, n):
if t is not None:
- print (" "*n) + t.data['label']
+ print((" "*n) + t.data['label'])
for c in t.lpChildren:
g(c, n+4)
else:
- print " "*n + "None"
+ print(" "*n + "None")
g(t,0)
- print translate(t)
+ print(translate(t))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/overlay.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/overlay.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/overlay.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/overlay.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/overlay.py (refactored)
@@ -1,5 +1,5 @@
import re
-from translator import translate_sub
+from .translator import translate_sub
__all__ = ["find_overlays", "Overlay"];
@@ -138,7 +138,7 @@
M = []
for match in TAB:
- m = match.items()
+ m = list(match.items())
m.sort()
L = []
for sym,tup in m:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/nodefeaturedialog.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/nodefeaturedialog.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/nodefeaturedialog.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/nodefeaturedialog.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/nodefeaturedialog.py (refactored)
@@ -1,6 +1,6 @@
from qt import *
-from at_lite import TableModel, TableEdit
-import lpath
+from .at_lite import TableModel, TableEdit
+from . import lpath
class NodeFeatureDialog(QDialog):
def __init__(self, node, parent):
@@ -8,7 +8,7 @@
self.setCaption('Node Attribute Dialog')
self.resize(320,240)
- tab = TableModel([("Name",unicode),("Value",unicode)])
+ tab = TableModel([("Name",str),("Value",str)])
tab.insertRow(None, ['label',node.data['label']])
if '@func' in node.data:
for v in node.data['@func']:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree_qt.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree_qt.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree_qt.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree_qt.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree_qt.py (refactored)
@@ -1,5 +1,5 @@
-from lpathtree import LPathTreeModel as PureLPathTree
-from axis import *
+from .lpathtree import LPathTreeModel as PureLPathTree
+from .axis import *
from qt import QObject
__all__ = ['LPathTreeModel']
@@ -55,7 +55,7 @@
self.axis = cls(self.gui.canvas())
self.axis.target = target
self.axis.root = root
- apply(self.axis.setPoints, coords)
+ self.axis.setPoints(*coords)
if self.getNot():
self.axis.setHeadType(Axis.HeadNegation)
elif not self.lpOnMainTrunk():
@@ -71,7 +71,7 @@
node.axis.target = node
#coords = node.gui.connectingLine(self.gui)
coords = self.gui.connectingLine(node.gui)
- apply(node.axis.setPoints, coords)
+ node.axis.setPoints(*coords)
if node.getNot():
node.axis.setHeadType(Axis.HeadNegation)
elif not node.lpOnMainTrunk():
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/lpathtree.py (refactored)
@@ -1,5 +1,5 @@
-import at_lite as at
-from at_lite.tree import TreeModel as PureTree
+from . import at_lite as at
+from .at_lite.tree import TreeModel as PureTree
__all__ = ['LPathTreeModel']
@@ -380,7 +380,7 @@
L = []
if self.lpScope is not None:
def f(node):
- if node.lpScope == self.lpScope and filter(node):
+ if node.lpScope == self.lpScope and list(filter(node)):
L.append(node)
self.root.dfs(f)
return L
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/lpath/tb2tbl.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/lpath/tb2tbl.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/lpath/tb2tbl.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/lpath/tb2tbl.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/lpath/tb2tbl.py (refactored)
@@ -13,7 +13,7 @@
#conn.begin()
#cursor.execute("begin")
for r in tree.exportLPathTable(TableModel,a,b):
- print r
+ print(r)
cursor.execute(SQL1, tuple(r))
#cursor.execute("commit")
conn.commit()
@@ -25,8 +25,8 @@
conn = PgSQL.connect(
host=opts.host, port=opts.port, database=opts.db,
user=opts.user, password=opts.passwd)
- except PgSQL.libpq.DatabaseError, e:
- print e
+ except PgSQL.libpq.DatabaseError as e:
+ print(e)
sys.exit(1)
return conn
elif opts.servertype == 'oracle':
@@ -43,12 +43,12 @@
try:
conn = MySQLdb.connect(host=opts.host, port=opts.port, db=opts.db,
user=opts.user, passwd=opts.passwd)
- except DatabaseError, e:
- print e
+ except DatabaseError as e:
+ print(e)
sys.exit(1)
return conn
- except ImportError, e:
- print e
+ except ImportError as e:
+ print(e)
sys.exit(1)
def limit(servertype, sql, num):
@@ -154,7 +154,7 @@
optpar.error("user name is missing")
if opts.passwd is None:
- print "Password:",
+ print("Password:", end=' ')
opts.passwd = getpass()
else:
passwd = opts.passwd
@@ -186,20 +186,20 @@
conn = connectdb(opts)
cursor = conn.cursor()
- print os.path.join('',os.path.dirname(sys.argv[0]))
+ print(os.path.join('',os.path.dirname(sys.argv[0])))
# check if table exists
try:
sql = limit(opts.servertype, "select * from "+opts.table, 1)
cursor.execute(sql)
- except DatabaseError, e:
+ except DatabaseError as e:
if opts.create:
p = os.path.join(os.path.dirname(sys.argv[0]),'lpath-schema.sql')
for line in file(p).read().replace("TABLE",opts.table).split(';'):
if line.strip():
cursor.execute(line)
else:
- print "table %s doesn't exist" % `opts.table`
+ print("table %s doesn't exist" % repr(opts.table))
sys.exit(1)
# set correct table name in the insertion SQL
@@ -232,20 +232,20 @@
reader = codecs.getreader('utf-8')
if tbdir == '-':
for tree in TreeModel.importTreebank(reader(sys.stdin)):
- print tree
+ print(tree)
do(tree)
count -= 1
if count == 0: break
else:
for root, dirs, files in os.walk(tbdir):
for f in files:
- print f,
+ print(f, end=' ')
if filter.match(f):
p = os.path.join(root,f)
for tree in TreeModel.importTreebank(reader(file(p))):
do(tree)
count -= 1
if count == 0: sys.exit(0) # done
- print sid
+ print(sid)
else:
- print 'skipped'
+ print('skipped')
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/lpath/lpath.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/lpath/lpath.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/lpath/lpath.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/lpath/lpath.py (refactored)
@@ -142,11 +142,11 @@
L = []
for x in self:
if isinstance(x, str):
- L.append(unicode(x))
- elif isinstance(x, unicode):
+ L.append(str(x))
+ elif isinstance(x, str):
L.append(x)
elif isinstance(x, AND) or isinstance(x, OR) or isinstance(x, NOT):
- L.append(unicode(x))
+ L.append(str(x))
elif isinstance(x, flatten):
for e in x:
L.append("%s%s%s" % tuple(e))
@@ -155,7 +155,7 @@
elif isinstance(x, Trans):
L.append("exists (%s)" % x.getSql())
else:
- L.append(unicode(x))
+ L.append(str(x))
L.append(self.joiner)
return "(" + " ".join(L[:-1]) + ")"
@@ -182,7 +182,7 @@
return "not " + str(self.lst)
def __unicode__(self):
- return "not " + unicode(self.lst)
+ return "not " + str(self.lst)
class flatten(list):
@@ -219,7 +219,7 @@
if hasattr(self, k):
eval('self.' + k)
else:
- raise(AttributeError("Step instance has no attribute '%s'" % k))
+ raise AttributeError
class Trans:
@@ -286,7 +286,7 @@
s2 = self.steps[i+1]
self._interpreteAxis(s, s2.axis, s2)
- w = unicode(self.WHERE).strip()
+ w = str(self.WHERE).strip()
if w: sql += "where %s" % w
return sql
@@ -295,7 +295,7 @@
name = "_" + t.node
for c in t:
name += "_"
- if isinstance(c,str) or isinstance(c,unicode):
+ if isinstance(c,str) or isinstance(c,str):
name += self.TR[c]
else:
name += c.node
@@ -357,7 +357,7 @@
[step1.left, "<=", step2.left],
[step1.right, ">=", step2.right],
[step1.depth, "<", step2.depth],
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -366,7 +366,7 @@
AND([step1.left, "<=", step2.left],
[step1.right, ">=", step2.right],
[step1.depth, "<", step2.depth],
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE)))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE)))
))
]
@@ -408,7 +408,7 @@
[step1.left, ">=", step2.left],
[step1.right, "<=", step2.right],
[step1.depth, ">", step2.depth],
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -417,7 +417,7 @@
AND([step1.left, ">=", step2.left],
[step1.right, "<=", step2.right],
[step1.depth, ">", step2.depth],
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE)))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE)))
))
]
@@ -449,8 +449,8 @@
["z.left", ">=", step1.right],
["z.right", "<=", step2.left],
NOT(GRP(flatten(step2.getConstraints()))),
- "not exists (select 1 from %s c where %s)" % (self.tname,unicode(cWHERE)),
- "not exists (select 1 from %s w where %s)" % (self.tname,unicode(wWHERE))
+ "not exists (select 1 from %s c where %s)" % (self.tname,str(cWHERE)),
+ "not exists (select 1 from %s w where %s)" % (self.tname,str(wWHERE))
)
self.WHERE += [
@@ -470,7 +470,7 @@
self.WHERE += [
[step1.right, "<=", step2.left],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -479,7 +479,7 @@
GRP(AND(
[step1.right, "<=", step2.left],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
))))
]
@@ -511,8 +511,8 @@
["z.left", ">=", step2.right],
["z.right", "<=", step1.left],
NOT(GRP(flatten(step2.getConstraints()))),
- "not exists (select 1 from %s c where %s)" % (self.tname,unicode(cWHERE)),
- "not exists (select 1 from %s w where %s)" % (self.tname,unicode(wWHERE))
+ "not exists (select 1 from %s c where %s)" % (self.tname,str(cWHERE)),
+ "not exists (select 1 from %s w where %s)" % (self.tname,str(wWHERE))
)
self.WHERE += [
@@ -532,7 +532,7 @@
self.WHERE += [
[step1.left, ">=", step2.right],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -541,7 +541,7 @@
GRP(AND(
[step1.left, ">=", step2.right],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
))))
]
@@ -573,8 +573,8 @@
["z.left", ">=", step1.right],
["z.right", "<=", step2.left],
NOT(GRP(flatten(step2.getConstraints()))),
- "not exists (select 1 from %s c where %s)" % (self.tname,unicode(cWHERE)),
- "not exists (select 1 from %s w where %s)" % (self.tname,unicode(wWHERE))
+ "not exists (select 1 from %s c where %s)" % (self.tname,str(cWHERE)),
+ "not exists (select 1 from %s w where %s)" % (self.tname,str(wWHERE))
)
self.WHERE += [
@@ -596,7 +596,7 @@
[step1.right, "<=", step2.left],
[step1.pid, "=", step2.pid],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -606,7 +606,7 @@
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/lpath/lpath.py
[step1.right, "<=", step2.left],
[step1.pid, "=", step2.pid],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
))))
]
@@ -638,8 +638,8 @@
["z.left", ">=", step2.right],
["z.right", "<=", step1.left],
NOT(GRP(flatten(step2.getConstraints()))),
- "not exists (select 1 from %s c where %s)" % (self.tname,unicode(cWHERE)),
- "not exists (select 1 from %s w where %s)" % (self.tname,unicode(wWHERE))
+ "not exists (select 1 from %s c where %s)" % (self.tname,str(cWHERE)),
+ "not exists (select 1 from %s w where %s)" % (self.tname,str(wWHERE))
)
self.WHERE += [
@@ -661,7 +661,7 @@
[step1.left, ">=", step2.right],
[step1.pid, "=", step2.pid],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
]
elif step2.conditional == '*':
self.WHERE += [
@@ -671,7 +671,7 @@
[step1.left, ">=", step2.right],
[step1.pid, "=", step2.pid],
flatten(step2.getConstraints()),
- "not exists (select 1 from %s z where %s)" % (self.tname,unicode(zWHERE))
+ "not exists (select 1 from %s z where %s)" % (self.tname,str(zWHERE))
))))
]
@@ -936,7 +936,7 @@
s2 = self.steps[i+1]
self._interpreteAxis(s, s2.axis, s2)
- w = unicode(self.WHERE).strip()
+ w = str(self.WHERE).strip()
if w: sql += "where %s" % w
return sql
@@ -950,7 +950,7 @@
for i,s in enumerate(tr.steps[:-1]):
s2 = tr.steps[i+1]
tr._interpreteAxis(s, s2.axis, s2)
- self.WHERE.append(unicode(tr.WHERE).strip())
+ self.WHERE.append(str(tr.WHERE).strip())
def translate2(q,tname='T'):
global T2, T3, T4, T5, T6, GR
@@ -998,13 +998,13 @@
def print_profile():
- print
- print " python startup: %6.3fs" % (T1-T0)
- print " query tokenization: %6.3fs" % (T3-T2)
- print " grammar parsing: %6.3fs" % (T4-T3)
- print " chart parsing: %6.3fs" % (T5-T4)
- print " translation: %6.3fs" % (T6-T5)
- print
+ print()
+ print(" python startup: %6.3fs" % (T1-T0))
+ print(" query tokenization: %6.3fs" % (T3-T2))
+ print(" grammar parsing: %6.3fs" % (T4-T3))
+ print(" chart parsing: %6.3fs" % (T5-T4))
+ print(" translation: %6.3fs" % (T6-T5))
+ print()
def get_profile():
# tok/grammar/parsing/trans times
@@ -1038,6 +1038,6 @@
#l = tokenize('//VP[{//^V->NP->PP$}]')
#l = tokenize('//A//B//C')
- print translate2(sys.argv[1])[1]
+ print(translate2(sys.argv[1])[1])
print_profile()
#print get_grammar()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/lpath/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/lpath/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/lpath/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/lpath/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/lpath/__init__.py (refactored)
@@ -1 +1 @@
-from lpath import *
+from .lpath import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/dbdialog.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/dbdialog.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/dbdialog.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/dbdialog.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/dbdialog.py (refactored)
@@ -1,5 +1,5 @@
from qt import *
-from db import *
+from .db import *
import os
try:
from pyPgSQL import PgSQL
@@ -67,7 +67,7 @@
conn = PgSQL.connect(**conninfo)
conn2 = PgSQL.connect(**conninfo)
return LPathPgSqlDB(conn, conn2, conninfo["user"].ascii())
- except PgSQL.libpq.DatabaseError, e:
+ except PgSQL.libpq.DatabaseError as e:
try:
enc = os.environ['LANG'].split('.')[-1]
msg = e.message.decode(enc)
@@ -111,7 +111,7 @@
try:
conn = cx_Oracle.connect(user+'/'+pw+service)
conn2 = cx_Oracle.connect(user+'/'+pw+service)
- except cx_Oracle.DatabaseError, e:
+ except cx_Oracle.DatabaseError as e:
try:
enc = os.environ['LANG'].split('.')[-1]
msg = e.__str__().decode(enc)
@@ -157,7 +157,7 @@
try:
conn = MySQLdb.connect(**conninfo)
return LPathMySQLDB(conn)
- except MySQLdb.DatabaseError, e:
+ except MySQLdb.DatabaseError as e:
try:
enc = os.environ['LANG'].split('.')[-1]
msg = e.message.decode(enc)
@@ -235,7 +235,7 @@
try:
self.db = self.wstack.visibleWidget().connect()
self.accept()
- except ConnectionError, e:
+ except ConnectionError as e:
QMessageBox.critical(self, "Connection Error",
"Unable to connect to database:\n" + e.__str__())
@@ -276,7 +276,7 @@
def _okClicked(self):
sel = self.listbox.selectedItem()
if sel is not None:
- self.tab = unicode(sel.text())
+ self.tab = str(sel.text())
self.accept()
else:
QMessageBox.critical(self, "Error", "You didn't select a table.")
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/db.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/db.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/db.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/db.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/db.py (refactored)
@@ -4,14 +4,14 @@
import time
from qt import *
from threading import Thread, Lock
-import lpath
-import at_lite as at
+from . import lpath
+from . import at_lite as at
#from pyPgSQL import PgSQL
try:
from sqlite3 import dbapi2 as sqlite
except ImportError:
from pysqlite2 import dbapi2 as sqlite
-from lpathtree_qt import *
+from .lpathtree_qt import *
__all__ = ["LPathDB", "LPathDbI", "LPathPgSqlDB", "LPathOracleDB", "LPathMySQLDB"]
@@ -89,7 +89,7 @@
LPATH_TABLE_HEADER = [
('sid',int),('tid',int),('id',int),('pid',int),
('left',int),('right',int),('depth',int),
- ('type',unicode),('name',unicode),('value',unicode)
+ ('type',str),('name',str),('value',str)
]
EVENT_MORE_TREE = QEvent.User
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/axis.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/axis.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/axis.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/axis.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/axis.py (refactored)
@@ -137,7 +137,7 @@
self._drawNegationHead(painter)
def drawShape(self, painter):
- apply(painter.drawLine, self.points)
+ painter.drawLine(*self.points)
self.drawLineHead(painter)
def toggleHeadType(self):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeio.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeio.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeio.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeio.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeio.py (refactored)
@@ -14,10 +14,10 @@
continue
c = n.children
if c:
- s += ' (' + unicode(n.data[p])
+ s += ' (' + str(n.data[p])
L = c + [None] + L[1:]
else:
- s += ' ' + unicode(n.data[p])
+ s += ' ' + str(n.data[p])
L = L[1:]
return s[1:]
@@ -118,7 +118,7 @@
# Make sure all the node's application-specific attributes are recorded.
r['attributes'] = []
if n.data != None:
- for attr, value in n.data.iteritems():
+ for attr, value in n.data.items():
if attr == 'label':
r['name'] = value
else:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeedit_qlistview.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeedit_qlistview.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeedit_qlistview.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeedit_qlistview.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/treeedit_qlistview.py (refactored)
@@ -1,5 +1,5 @@
from qt import QListView, QListViewItem, PYSIGNAL
-from myaccel import AccelKeyHandler
+from .myaccel import AccelKeyHandler
__all__ = ['TreeEdit']
@@ -91,7 +91,7 @@
if self.data is None: return
n = self.data.__class__()
x = [self] + [None] * len(self.col2str)
- item = apply(TreeEditItem,x)
+ item = TreeEditItem(*x)
for sig in ("attach","insertLeft","insertRight","prune","splice"):
n.connect(n,PYSIGNAL(sig),eval("item._%s"%sig))
self.takeItem(item)
@@ -147,7 +147,7 @@
x = [T[-1],n.data[fields[0][0]]]
for f,v in fields[1:]:
x.append(str(n.data[f]))
- e = apply(TreeEditItem, x)
+ e = TreeEditItem(*x)
for sig in ("attach","insertLeft","insertRight","prune","splice"):
n.connect(n,PYSIGNAL(sig),eval("e._%s"%sig))
e.treenode = n
@@ -159,7 +159,7 @@
if __name__ == "__main__":
- from tree_qt import TreeModel
+ from .tree_qt import TreeModel
import qt
class Demo(qt.QVBox):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree_qt.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree_qt.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree_qt.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree_qt.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree_qt.py (refactored)
@@ -1,5 +1,5 @@
from qt import QObject, PYSIGNAL
-from tree import TreeModel as PureTree
+from .tree import TreeModel as PureTree
__all__ = ['TreeModel']
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tree.py (refactored)
@@ -1,4 +1,4 @@
-from treeio import TreeIo
+from .treeio import TreeIo
__all__ = ['TreeModel']
@@ -149,4 +149,4 @@
s = "(S (NP (N I)) (VP (VP (V saw) (NP (DT the) (N man))) (PP (P with) (NP (DT a) (N telescope)))))"
t = bracket_parse(s)
root = TreeModel.importNltkLiteTree(t)
- print root.treebankString("label")
+ print(root.treebankString("label"))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableproxy.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableproxy.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableproxy.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableproxy.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableproxy.py (refactored)
@@ -19,7 +19,7 @@
else:
return None
- def next(self):
+ def __next__(self):
if self._limit > self._top:
self._top += 1
return self._stack[self._top]
@@ -149,7 +149,7 @@
def redo(self, n=1):
for m in range(n):
try:
- op, arg1, arg2 = self.undoStack.next()
+ op, arg1, arg2 = next(self.undoStack)
#print "redo", op, arg1, arg2
#print len(self.undoStack._stack)
except TypeError:
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableio.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableio.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableio.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableio.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableio.py (refactored)
@@ -27,7 +27,7 @@
import codecs
import re
-from error import *
+from .error import *
__all__ = ['TableIo']
@@ -38,7 +38,7 @@
size = [len(str(x)) for x,t in self.header]
for row in self.table:
for i,c in enumerate(row):
- if type(c)==str or type(c)==unicode:
+ if type(c)==str or type(c)==str:
n = len(c)
else:
n = len(str(c))
@@ -52,7 +52,7 @@
s += "%%%ds|" % size[i] % str(c)
else:
s += "%%-%ds|" % size[i] % c
- print s[:-1]
+ print(s[:-1])
printRow([s for s,t in self.header])
for row in self.table:
@@ -73,7 +73,7 @@
f = writer(file(filename,'w'))
f.write("\t".join([a[0]+';'+a[1].__name__
for a in self.header]) + "\n")
- for item in self.metadata.items():
+ for item in list(self.metadata.items()):
f.write(";;MM %s\t%s\n" % item)
for row in self.table:
for c in row[:-1]:
@@ -81,7 +81,7 @@
f.write("\t")
else:
t = type(c)
- if t==str or t==unicode:
+ if t==str or t==str:
f.write(c+"\t")
else:
f.write(str(c)+"\t")
@@ -89,18 +89,18 @@
f.write("\n")
else:
t = type(row[-1])
- if t==str or t==unicode:
+ if t==str or t==str:
f.write(row[-1]+"\n")
else:
f.write(str(row[-1])+"\n")
- except IOError, e:
+ except IOError as e:
raise Error(ERR_TDF_EXPORT, str(e))
def importTdf(cls, filename):
_,_,reader,_ = codecs.lookup('utf-8')
try:
f = reader(file(filename))
- except IOError, e:
+ except IOError as e:
raise Error(ERR_TDF_IMPORT, e)
head = []
for h in f.readline().rstrip("\r\n").split("\t"):
@@ -125,10 +125,10 @@
try:
for i,cell in enumerate(l.rstrip("\n").split("\t")):
row.append(head[i][1](cell))
- except ValueError, e:
+ except ValueError as e:
raise Error(ERR_TDF_IMPORT,
"[%d:%d] %s" % (lno,i,str(e)))
- except IndexError, e:
+ except IndexError as e:
msg = "record has too many fields"
raise Error(ERR_TDF_IMPORT,
"[%d:%d] %s" % (lno,i,msg))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableedit_qtable.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableedit_qtable.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableedit_qtable.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableedit_qtable.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/tableedit_qtable.py (refactored)
@@ -7,8 +7,8 @@
self.data = None
def setData(self, data):
- self.removeColumns(range(self.numCols()))
- self.removeRows(range(self.numRows()))
+ self.removeColumns(list(range(self.numCols())))
+ self.removeRows(list(range(self.numRows())))
self.setNumCols(len(data.header))
for j,(h,t) in enumerate(data.header):
@@ -17,7 +17,7 @@
for i,row in enumerate(data):
for j,h in enumerate(row):
if h is not None:
- if type(h)==str or type(h)==unicode:
+ if type(h)==str or type(h)==str:
self.setText(i,j,h)
else:
self.setText(i,j,str(h))
@@ -36,7 +36,7 @@
if val is None:
val = ''
self.disconnect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
- self.setText(i,j,unicode(val))
+ self.setText(i,j,str(val))
self.connect(self,SIGNAL("valueChanged(int,int)"),self.__cellChanged)
def _insertRow(self, i, row):
@@ -70,7 +70,7 @@
if __name__ == '__main__':
import qt
- from table_qt import TableModel
+ from .table_qt import TableModel
class Demo(qt.QVBox):
def __init__(self):
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table_qt.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table_qt.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table_qt.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table_qt.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table_qt.py (refactored)
@@ -1,5 +1,5 @@
-import tableproxy
-from table import TableModel
+from . import tableproxy
+from .table import TableModel
__all__ = ['TableModel']
@@ -17,24 +17,24 @@
tab[1][2] = 3
tab.printTable()
- print
+ print()
tab.insertColumn(1,[("extra",int),10,9])
tab.printTable()
- print
+ print()
c = tab.takeColumn(1)
tab.insertColumn(3,c)
tab.printTable()
- print
+ print()
r = tab.takeRow(0)
tab.insertRow(1,r)
tab.printTable()
- print
+ print()
tab.sort(1,2)
tab.printTable()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/table.py (refactored)
@@ -1,4 +1,4 @@
-from tableio import TableIo
+from .tableio import TableIo
import bisect
__all__ = ['TableModel']
@@ -177,7 +177,7 @@
"""
if type(col) != int:
col = self.str2col[col]
- return bisect.bisect_left(map(lambda x:x[col],self.table),val)
+ return bisect.bisect_left([x[col] for x in self.table],val)
def bisect_right(self, col, val):
"""
@@ -185,7 +185,7 @@
"""
if type(col) != int:
col = self.str2col[col]
- return bisect.bisect_right(map(lambda x:x[col],self.table),val)
+ return bisect.bisect_right([x[col] for x in self.table],val)
def setMetadata(self, nam, val):
if type(val) != str:
@@ -210,24 +210,24 @@
tab[1][2] = 3
tab.printTable()
- print
+ print()
tab.insertColumn(1,["extra",10,9])
tab.printTable()
- print
+ print()
c = tab.takeColumn(1)
tab.insertColumn(3,c)
tab.printTable()
- print
+ print()
r = tab.takeRow(0)
tab.insertRow(1,r)
tab.printTable()
- print
+ print()
tab.sort(2,3)
tab.printTable()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/myaccel.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/myaccel.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/myaccel.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/myaccel.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/myaccel.py (refactored)
@@ -69,7 +69,7 @@
"""
bindings = {}
- for keyseq,binding in keyBindings.items():
+ for keyseq,binding in list(keyBindings.items()):
seq = []
for subkeyseq in keyseq.split(','):
a = []
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/error.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: No changes to ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/error.py
RefactoringTool: Files that need to be modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/error.py
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/at_lite/__init__.py (refactored)
@@ -2,9 +2,9 @@
"""
"""
-from tree_qt import *
-from treeedit_qlistview import *
-from table_qt import *
-from tableedit_qtable import *
+from .tree_qt import *
+from .treeedit_qlistview import *
+from .table_qt import *
+from .tableedit_qtable import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lpath/__init__.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lpath/__init__.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lpath/__init__.py
--- ../python3/nltk_contrib/nltk_contrib/lpath/__init__.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lpath/__init__.py (refactored)
@@ -1 +1 @@
-from lpath import *
+from .lpath import *
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lambek/typedterm.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lambek/typedterm.py
--- ../python3/nltk_contrib/nltk_contrib/lambek/typedterm.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lambek/typedterm.py (refactored)
@@ -9,7 +9,7 @@
"""CG-style types"""
import types
-from term import *
+from .term import *
#####################################
# TYPEDTERM
@@ -26,14 +26,14 @@
self.type = type
def __repr__(self):
- return `self.term`+': '+`self.type`
+ return repr(self.term)+': '+repr(self.type)
def pp(self, pp_varmap=None):
- return self.term.pp(pp_varmap)+': '+`self.type`
+ return self.term.pp(pp_varmap)+': '+repr(self.type)
def to_latex(self, pp_varmap=None):
term = self.term.to_latex(pp_varmap)
- type = `self.type`
+ type = repr(self.type)
type = re.sub(r'\\', r'$\\backslash$', type)
type = re.sub(r'\*', r'$\\cdot$', type)
return term+': \\textrm{'+type+'}'
@@ -72,11 +72,11 @@
def __repr__(self):
if isinstance(self.result, RSlash) or \
isinstance(self.result, LSlash):
- right = '('+`self.result`+')'
- else: right = `self.result`
+ right = '('+repr(self.result)+')'
+ else: right = repr(self.result)
if isinstance(self.arg, RSlash):
- left = '('+`self.arg`+')'
- else: left = `self.arg`
+ left = '('+repr(self.arg)+')'
+ else: left = repr(self.arg)
return left + '\\' + right
def __cmp__(self, other):
if isinstance(other, LSlash) and self.arg == other.arg and \
@@ -95,15 +95,15 @@
raise TypeError('Expected Type arguments')
def __repr__(self):
if isinstance(self.result, RSlash):
- left = '('+`self.result`+')'
- else: left = `self.result`
- return left + '/' + `self.arg`
+ left = '('+repr(self.result)+')'
+ else: left = repr(self.result)
+ return left + '/' + repr(self.arg)
#return '('+`self.result`+'/'+`self.arg`+')'
if isinstance(self.arg, LSlash):
- return `self.result`+'/('+`self.arg`+')'
- else:
- return `self.result`+'/'+`self.arg`
+ return repr(self.result)+'/('+repr(self.arg)+')'
+ else:
+ return repr(self.result)+'/'+repr(self.arg)
def __cmp__(self, other):
if isinstance(other, RSlash) and self.arg == other.arg and \
self.result == other.result:
@@ -113,7 +113,7 @@
class BaseType(Type):
def __init__(self, name):
- if type(name) != types.StringType:
+ if type(name) != bytes:
raise TypeError("Expected a string name")
self.name = name
def __repr__(self):
@@ -131,7 +131,7 @@
if not isinstance(right, Type) or not isinstance(left, Type):
raise TypeError('Expected Type arguments')
def __repr__(self):
- return '('+`self.left`+'*'+`self.right`+')'
+ return '('+repr(self.left)+'*'+repr(self.right)+')'
def __cmp__(self, other):
if isinstance(other, Dot) and self.left == other.left and \
self.right == other.right:
@@ -205,7 +205,7 @@
else: i += 1
if len(segments) != 1:
- print 'Ouch!!', segments, ops
+ print('Ouch!!', segments, ops)
return segments[0]
@@ -219,16 +219,16 @@
vp = LSlash(np, s)
v2 = RSlash(vp, np)
AB = Dot(A, B)
- print v2
- print AB
- print LSlash(AB, v2)
- print Dot(v2, AB)
-
- print parse_type('A / B')
- print parse_type('A \\ B')
- print parse_type('A / B / C')
- print parse_type('A * B')
- print parse_type('A \\ B \\ C')
- print parse_type('A \\ (B / C)')
- print parse_type('(A / B) \\ C')
- print parse_type('(A / B) \\ C')
+ print(v2)
+ print(AB)
+ print(LSlash(AB, v2))
+ print(Dot(v2, AB))
+
+ print(parse_type('A / B'))
+ print(parse_type('A \\ B'))
+ print(parse_type('A / B / C'))
+ print(parse_type('A * B'))
+ print(parse_type('A \\ B \\ C'))
+ print(parse_type('A \\ (B / C)'))
+ print(parse_type(RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lambek/typedterm.py
'(A / B) \\ C'))
+ print(parse_type('(A / B) \\ C'))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lambek/term.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lambek/term.py
--- ../python3/nltk_contrib/nltk_contrib/lambek/term.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lambek/term.py (refactored)
@@ -25,7 +25,7 @@
Var._max_id += 1
self.id = Var._max_id
def __repr__(self):
- return '?' + `self.id`
+ return '?' + repr(self.id)
def pp(self, pp_varmap=None):
if pp_varmap == None: pp_varmap = make_pp_varmap(self)
return pp_varmap[self]
@@ -40,7 +40,7 @@
class Const(Term):
def __init__(self, name):
- if type(name) != types.StringType:
+ if type(name) != bytes:
raise TypeError("Expected a string name")
self.name = name
def __repr__(self):
@@ -64,9 +64,9 @@
def __repr__(self):
if isinstance(self.func, Appl) or \
isinstance(self.func, Abstr):
- return '('+`self.func` + ')(' + `self.arg` + ')'
- else:
- return `self.func` + '(' + `self.arg` + ')'
+ return '('+repr(self.func) + ')(' + repr(self.arg) + ')'
+ else:
+ return repr(self.func) + '(' + repr(self.arg) + ')'
def pp(self, pp_varmap=None):
if pp_varmap == None: pp_varmap = make_pp_varmap(self)
if isinstance(self.func, Appl) or \
@@ -101,9 +101,9 @@
def __repr__(self):
if isinstance(self.body, Abstr) or \
isinstance(self.body, Appl):
- return '(\\' + `self.var` + '.' + `self.body`+')'
- else:
- return '\\' + `self.var` + '.' + `self.body`
+ return '(\\' + repr(self.var) + '.' + repr(self.body)+')'
+ else:
+ return '\\' + repr(self.var) + '.' + repr(self.body)
def pp(self, pp_varmap=None):
if pp_varmap == None: pp_varmap = make_pp_varmap(self)
if isinstance(self.body, Abstr) or \
@@ -136,7 +136,7 @@
not isinstance(self.right, Term):
raise TypeError('Expected Term arguments')
def __repr__(self):
- return '<'+`self.left`+', '+`self.right`+'>'
+ return '<'+repr(self.left)+', '+repr(self.right)+'>'
def pp(self, pp_varmap=None):
if pp_varmap == None: pp_varmap = make_pp_varmap(self)
return '<'+self.left.pp(pp_varmap)+', '+\
@@ -160,20 +160,20 @@
# Get the remaining names.
freenames = [n for n in Term.FREEVAR_NAME \
- if n not in pp_varmap.values()]
+ if n not in list(pp_varmap.values())]
boundnames = Term.BOUNDVAR_NAME[:]
for fv in free:
- if not pp_varmap.has_key(fv):
+ if fv not in pp_varmap:
if freenames == []:
- pp_varmap[fv] = `fv`
+ pp_varmap[fv] = repr(fv)
else:
pp_varmap[fv] = freenames.pop()
for bv in bound:
- if not pp_varmap.has_key(bv):
+ if bv not in pp_varmap:
if boundnames == []:
- pp_varmap[bv] = `bv`
+ pp_varmap[bv] = repr(bv)
else:
pp_varmap[bv] = boundnames.pop()
@@ -183,7 +183,7 @@
def __init__(self):
self._map = {}
def add(self, var, term):
- if self._map.has_key(var):
+ if var in self._map:
if term != None and term != self._map[var]:
# Unclear what I should do here -- for now, just pray
# for the best. :)
@@ -191,7 +191,7 @@
else:
self._map[var] = term
def __repr__(self):
- return `self._map`
+ return repr(self._map)
def _get(self, var, orig, getNone=1):
val = self._map[var]
if not getNone and val == None: return var
@@ -201,17 +201,17 @@
# Break the loop at an arbitrary point.
del(self._map[val])
return val
- elif self._map.has_key(val):
+ elif val in self._map:
return(self._get(val, orig, getNone))
else:
return val
def __getitem__(self, var):
- if self._map.has_key(var):
+ if var in self._map:
return self._get(var, var, 1)
else:
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lambek/term.py
return var
def simplify(self, var):
- if self._map.has_key(var):
+ if var in self._map:
return self._get(var, var, 0)
else:
return var
@@ -221,7 +221,7 @@
return result
def __add__(self, other):
result = self.copy()
- for var in other._map.keys():
+ for var in list(other._map.keys()):
result.add(var, other[var])
return result
def copy_from(self, other):
@@ -251,7 +251,7 @@
_VERBOSE = 0
def unify(term1, term2, varmap=None, depth=0):
- if _VERBOSE: print ' '*depth+'>> unify', term1, term2, varmap
+ if _VERBOSE: print(' '*depth+'>> unify', term1, term2, varmap)
term1 = reduce(term1)
term2 = reduce(term2)
if varmap == None: varmap = VarMap()
@@ -260,18 +260,18 @@
result = unify_oneway(term1, term2, varmap, depth+1)
if result:
if _VERBOSE:
- print ' '*depth+'<', result
+ print(' '*depth+'<', result)
return result
varmap.copy_from(old_varmap)
result = unify_oneway(term2, term1, varmap, depth+1)
if result:
if _VERBOSE:
- print ' '*depth+'<', result
+ print(' '*depth+'<', result)
return result
#raise(ValueError("can't unify", term1, term2, varmap))
if _VERBOSE:
- print ' '*depth+'unify', term1, term2, varmap, '=>', None
+ print(' '*depth+'unify', term1, term2, varmap, '=>', None)
return None
@@ -514,7 +514,7 @@
var = re.match(r'\?(.*)', str)
if var:
varname = var.groups()[0]
- if varmap.has_key(varname):
+ if varname in varmap:
return varmap[varname]
else:
var = Var()
@@ -535,22 +535,22 @@
f3 = Abstr(x, Appl(c, x))
f4 = Abstr(y, Appl(c, y))
- print f1, '=>', reduce(f1)
- print f2, '=>', reduce(f2)
- print f3, '=>', reduce(f3)
-
- print f1.pp()
- print f2.pp()
- print f3.pp()
-
- print
- print unify(x, y)
- print unify(x, c)
- print unify(x, f1)
- print unify(f3, f4)
- print unify(Abstr(x,Appl(x,x)), Abstr(y,Appl(y,y)))
-
- print parse_term('<(\?var.(?var))(?other_var),?x>').pp()
+ print(f1, '=>', reduce(f1))
+ print(f2, '=>', reduce(f2))
+ print(f3, '=>', reduce(f3))
+
+ print(f1.pp())
+ print(f2.pp())
+ print(f3.pp())
+
+ print()
+ print(unify(x, y))
+ print(unify(x, c))
+ print(unify(x, f1))
+ print(unify(f3, f4))
+ print(unify(Abstr(x,Appl(x,x)), Abstr(y,Appl(y,y))))
+
+ print(parse_term('<(\?var.(?var))(?other_var),?x>').pp())
reduce(parse_term(''))
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lambek/lexicon.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lambek/lexicon.py
RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lambek/lexicon.py
--- ../python3/nltk_contrib/nltk_contrib/lambek/lexicon.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lambek/lexicon.py (refactored)
@@ -13,8 +13,8 @@
"""
-from term import *
-from typedterm import *
+from .term import *
+from .typedterm import *
# Map from word to TypedTerm
class Lexicon:
@@ -29,16 +29,16 @@
(word, term, type) = line.split(':')
te = TypedTerm(parse_term(term), parse_type(type))
except ValueError:
- print 'Bad line:', line
+ print('Bad line:', line)
continue
word = word.strip().lower()
- if self._map.has_key(word):
- print 'Duplicate definitions for', word
+ if word in self._map:
+ print('Duplicate definitions for', word)
self._map[word] = te
def words(self):
- return self._map.keys()
+ return list(self._map.keys())
def __getitem__(self, word):
word = word.strip().lower()
+ for i in '$(find ../python3 -type f -name '\''*.py'\'' |grep -v '\''Tables\.py'\'')'
+ 2to3 -w -n ../python3/nltk_contrib/nltk_contrib/lambek/lambek.py
RefactoringTool: Skipping optional fixer: buffer
RefactoringTool: Skipping optional fixer: idioms
RefactoringTool: Skipping optional fixer: set_literal
RefactoringTool: Skipping optional fixer: ws_comma
RefactoringTool: Refactored ../python3/nltk_contrib/nltk_contrib/lambek/lambek.py
--- ../python3/nltk_contrib/nltk_contrib/lambek/lambek.py (original)
+++ ../python3/nltk_contrib/nltk_contrib/lambek/lambek.py (refactored)
@@ -18,9 +18,9 @@
_VAR_NAMES = 1
_SHOW_VARMAP = not _VAR_NAMES
-from term import *
-from typedterm import *
-from lexicon import *
+from .term import *
+from .typedterm import *
+from .lexicon import *
import sys, re
class Sequent:
@@ -30,8 +30,8 @@
def __init__(self, left, right):
# Check types, because we're paranoid.
- if type(left) not in [types.ListType, types.TupleType] or \
- type(right) not in [types.ListType, types.TupleType]:
+ if type(left) not in [list, tuple] or \
+ type(right) not in [list, tuple]:
raise TypeError('Expected lists of TypedTerms')
for elt in left+right:
if not isinstance(elt, TypedTerm):
@@ -41,8 +41,8 @@
self.right = right
def __repr__(self):
- left_str = `self.left`[1:-1]
- right_str = `self.right`[1:-1]
+ left_str = repr(self.left)[1:-1]
+ right_str = repr(self.right)[1:-1]
return left_str + ' => ' + right_str
def to_latex(self, pp_varmap=None):
@@ -86,8 +86,8 @@
self.varmap = varmap
def __repr__(self):
- return self.rule+' '+`self.assumptions`+' -> '\
- +`self.conclusion`
+ return self.rule+' '+repr(self.assumptions)+' -> '\
+ +repr(self.conclusion)
def simplify(self, varmap=None):
if varmap == None:
@@ -157,7 +157,7 @@
if _VAR_NAMES:
concl = self.conclusion.pp(pp_varmap)
else:
- concl = `self.conclusion`
+ concl = repr(self.conclusion)
# Draw assumptions
for assumption in self.assumptions:
@@ -175,7 +175,7 @@
if toplevel:
if _SHOW_VARMAP:
- return str+'\nVarmap: '+ `self.varmap`+'\n'
+ return str+'\nVarmap: '+ repr(self.varmap)+'\n'
else:
return str
else:
@@ -225,7 +225,7 @@
def _prove(sequent, varmap, short_circuit, depth):
if _VERBOSE:
- print (' '*depth)+'Trying to prove', sequent
+ print((' '*depth)+'Trying to prove', sequent)
proofs = []
@@ -245,7 +245,7 @@
proofs = proofs + dot_r(sequent, varmap, short_circuit, depth+1)
if _VERBOSE:
- print ' '*depth+'Found '+`len(proofs)`+' proof(s)'
+ print(' '*depth+'Found '+repr(len(proofs))+' proof(s)')
return proofs
@@ -506,14 +506,14 @@
sq = Sequent(left, right)
proofs = prove(sq, short_circuit)
if proofs:
- print '#'*60
- print "## Proof(s) for", sq.pp()
+ print('#'*60)
+ print("## Proof(s) for", sq.pp())
for proof in proofs:
- print
- print proof.to_latex()
+ print()
+ print(proof.to_latex())
else:
- print '#'*60
- print "## Can't prove", sq.pp()
+ print('#'*60)
+ print("## Can't prove", sq.pp())
def test_lambek():
lex = Lexicon()
@@ -573,70 +573,70 @@
if str.lower().endswith('off'): latexmode = 0
elif str.lower().endswith('on'): latexmode = 1
else: latexmode = not latexmode
- if latexmode: print >>out, '% latexmode on'
- else: print >>out, 'latexmode off'
+ if latexmode: print('% latexmode on', file=out)
+ else: print('latexmode off', file=out)
elif str.lower().startswith('short'):
if str.lower().endswith('off'): shortcircuit = 0
elif str.lower().endswith('on'): shortcircuit = 1
else: shortcircuit = not shortcircuit
- if shortcircuit: print >>out, '%shortcircuit on'
- else: print >>out, '% shortcircuit off'
+ if shortcircuit: print('%shortcircuit on', file=out)
+ else: print('% shortcircuit off', file=out)
elif str.lower().startswith('lex'):
words =RefactoringTool: Files that were modified:
RefactoringTool: ../python3/nltk_contrib/nltk_contrib/lambek/lambek.py
lex.words()
- print >>out, '% Lexicon: '
+ print('% Lexicon: ', file=out)
for word in words:
- print >>out, '% ' + word + ':', \
- ' '*(14-len(word)) + lex[word].pp()
+ print('% ' + word + ':', \
+ ' '*(14-len(word)) + lex[word].pp(), file=out)
elif str.lower().startswith('q'): return
elif str.lower().startswith('x'): return
else:
- print >>out, HELP
+ print(HELP, file=out)
else:
try:
(left, right) = str.split('=>')
seq = Sequent(lex.parse(left), lex.parse(right))
proofs = prove(seq, shortcircuit)
- print >>out
- print >>out, '%'*60
+ print(file=out)
+ print('%'*60, file=out)
if proofs:
- print >>out, "%% Proof(s) for", seq.pp()
+ print("%% Proof(s) for", seq.pp(), file=out)
for proof in proofs:
- print >>out
- if latexmode: print >>out, proof.to_latex()
- else: print >>out, proof.pp()
+ print(file=out)
+ if latexmode: print(proof.to_latex(), file=out)
+ else: print(proof.pp(), file=out)
else:
- print >>out, "%% Can't prove", seq.pp()
- except KeyError, e:
- print 'Mal-formatted sequent'
- print 'Key error (unknown lexicon entry?)'
- print e
- except ValueError, e:
- print 'Mal-formatted sequent'
- print e
+ print("%% Can't prove", seq.pp(), file=out)
+ except KeyError as e:
+ print('Mal-formatted sequent')
+ print('Key error (unknown lexicon entry?)')
+ print(e)
+ except ValueError as e:
+ print('Mal-formatted sequent')
+ print(e)
# Usage: argv[0] lexiconfile
def main(argv):
if (len(argv) != 2) and (len(argv) != 4):
- print 'Usage:', argv[0], ''
- print 'Usage:', argv[0], '