<86>Apr 21 03:39:15 userdel[45789]: delete user 'rooter' <86>Apr 21 03:39:15 groupadd[45869]: group added to /etc/group: name=rooter, GID=621 <86>Apr 21 03:39:15 groupadd[45869]: group added to /etc/gshadow: name=rooter <86>Apr 21 03:39:15 groupadd[45869]: new group: name=rooter, GID=621 <86>Apr 21 03:39:15 useradd[45899]: new user: name=rooter, UID=621, GID=621, home=/root, shell=/bin/bash <86>Apr 21 03:39:15 userdel[45919]: delete user 'builder' <86>Apr 21 03:39:15 userdel[45919]: removed group 'builder' owned by 'builder' <86>Apr 21 03:39:15 userdel[45919]: removed shadow group 'builder' owned by 'builder' <86>Apr 21 03:39:15 groupadd[45947]: group added to /etc/group: name=builder, GID=622 <86>Apr 21 03:39:15 groupadd[45947]: group added to /etc/gshadow: name=builder <86>Apr 21 03:39:15 groupadd[45947]: new group: name=builder, GID=622 <86>Apr 21 03:39:16 useradd[45970]: new user: name=builder, UID=622, GID=622, home=/usr/src, shell=/bin/bash <13>Apr 21 03:39:19 rpmi: perl-Encode-2.83-alt1 1459157776 installed <13>Apr 21 03:39:19 rpmi: libexpat-2.2.4-alt0.M80P.1 1503871120 installed <13>Apr 21 03:39:19 rpmi: ca-certificates-2016.02.25-alt1 1462368370 installed <13>Apr 21 03:39:19 rpmi: libcrypto10-1.0.2n-alt0.M80P.1 1512766129 installed <13>Apr 21 03:39:19 rpmi: perl-HTTP-Date-6.02-alt1 1348645274 installed <13>Apr 21 03:39:19 rpmi: libpng15-1.5.28-alt1 1484572014 installed <13>Apr 21 03:39:19 rpmi: libwayland-client-1.14.0-alt0.M80P.1 1502720500 installed <13>Apr 21 03:39:19 rpmi: perl-XML-NamespaceSupport-1.11-alt3 1322003535 installed <13>Apr 21 03:39:19 rpmi: libjpeg-2:1.3.1-alt0.1 1388074033 installed <13>Apr 21 03:39:19 rpmi: libsqlite3-3.15.2-alt1 1480432212 installed <13>Apr 21 03:39:19 rpmi: libtasn1-4.9-alt1 1469555628 installed <13>Apr 21 03:39:19 rpmi: libidn2-2.0.4-alt3.M80P.1 1520307194 installed <13>Apr 21 03:39:19 rpmi: libopenblas-0.2.14-alt1.git20150324 1433158855 installed <13>Apr 21 03:39:19 rpmi: liblcms2-2.8-alt1 1471601528 installed <13>Apr 21 03:39:19 rpmi: libwayland-server-1.14.0-alt0.M80P.1 1502720500 installed <13>Apr 21 03:39:19 rpmi: perl-LWP-MediaTypes-6.02-alt1 1329754558 installed <13>Apr 21 03:39:19 rpmi: perl-Compress-Raw-Zlib-2.069-alt1.1 1448470036 installed <13>Apr 21 03:39:19 rpmi: perl-libnet-1:3.08-alt1 1458419902 installed <13>Apr 21 03:39:19 rpmi: perl-URI-1.71-alt1 1455181348 installed <13>Apr 21 03:39:19 rpmi: perl-XML-SAX-Base-1.08-alt1 1317871344 installed <13>Apr 21 03:39:19 rpmi: libnettle6-3.4.1-alt1 p8.217493.100 1544204548 installed <13>Apr 21 03:39:19 rpmi: libgdbm-1.8.3-alt10 1454943313 installed <13>Apr 21 03:39:19 rpmi: libhogweed4-3.4.1-alt1 p8.217493.100 1544204548 installed <13>Apr 21 03:39:19 rpmi: perl-WWW-RobotRules-6.02-alt1 1329756211 installed <13>Apr 21 03:39:19 rpmi: libjasper-1.900.1-alt3 1391718482 installed <13>Apr 21 03:39:19 rpmi: libtiff5-4.0.3-alt1 1348347498 installed <13>Apr 21 03:39:19 rpmi: libwayland-cursor-1.14.0-alt0.M80P.1 1502720500 installed <13>Apr 21 03:39:19 rpmi: perl-File-Listing-6.04-alt1 1329758996 installed <13>Apr 21 03:39:19 rpmi: perl-Encode-Locale-1.05-alt1 1444608613 installed <13>Apr 21 03:39:19 rpmi: perl-IO-HTML-1.001-alt1 1404821752 installed <13>Apr 21 03:39:19 rpmi: python-module-z3c-3.0.0-alt2.a2.dev0.git20130313.1.1.1 1460417103 installed <13>Apr 21 03:39:19 rpmi: python-module-pycares-0.6.3-alt1.1.1 1459598195 installed <13>Apr 21 03:39:19 rpmi: pytz-zoneinfo-1:2015.4-alt1.1.1 1460413776 installed <13>Apr 21 03:39:19 rpmi: python-module-certifi-2015.04.28-alt1.1 1457893771 installed <13>Apr 21 03:39:19 rpmi: libnumpy-1:1.12.1-alt0.M80P.1 1496160663 installed <13>Apr 21 03:39:19 rpmi: libxblas-1.0.248-alt1 1322010716 installed <13>Apr 21 03:39:19 rpmi: libxkbcommon-0.7.2-alt0.M80P.1 1503524837 installed <13>Apr 21 03:39:19 rpmi: libgudev-1:231-alt0.M80P.1 1487052830 installed <13>Apr 21 03:39:19 rpmi: udev-rules-1:239-alt1.M80P.1 p8+226956.200.2.1 1555431539 installed <13>Apr 21 03:39:19 rpmi: libquadmath0-5.3.1-alt3.M80P.1 p8+225520.100.3.1 1553688800 installed <13>Apr 21 03:39:20 rpmi: libgfortran3-5.3.1-alt3.M80P.1 p8+225520.100.3.1 1553688800 installed <13>Apr 21 03:39:20 rpmi: liblapack-1:3.5.0-alt1 1401382194 installed <13>Apr 21 03:39:20 rpmi: libepoxy-1.4.3-alt0.M80P.1 1498727071 installed <13>Apr 21 03:39:20 rpmi: libdatrie-0.2.8-alt1_5 1410185969 installed <13>Apr 21 03:39:20 rpmi: libthai-0.1.24-alt1_1 1459282110 installed <13>Apr 21 03:39:20 rpmi: publicsuffix-list-dafsa-20190329-alt1 p8+226349.100.1.1 1554128046 installed <13>Apr 21 03:39:20 rpmi: libpsl-0.18.0-alt0.M80P.1 1502785107 installed <13>Apr 21 03:39:20 rpmi: libnghttp2-1.8.0-alt1 1456818805 installed <13>Apr 21 03:39:20 rpmi: libusb-1.0.21-alt0.M80P.1 1485147102 installed <13>Apr 21 03:39:20 rpmi: libpixman-3:0.32.8-alt1 1447610691 installed <13>Apr 21 03:39:20 rpmi: libbrotlicommon0-1.0.4-alt0.M80P.1 1528205024 installed <13>Apr 21 03:39:20 rpmi: libbrotlidec0-1.0.4-alt0.M80P.1 1528205024 installed <13>Apr 21 03:39:20 rpmi: libgraphite2-1.3.10-alt0.M80P.1 1496411360 installed <13>Apr 21 03:39:20 rpmi: libxshmfence-1.2-alt1 1420972191 installed <13>Apr 21 03:39:20 rpmi: libpciaccess-1:0.13.4-alt1 1431681688 installed <13>Apr 21 03:39:20 rpmi: libdrm-1:2.4.89-alt1.M80P.1 1516609380 installed <13>Apr 21 03:39:20 rpmi: libgbm-4:18.0.5-alt1.M80P.1 p8+226404.100.1.1 1554206332 installed <13>Apr 21 03:39:20 rpmi: perl-IO-Socket-IP-0.37-alt1 1444921335 installed <13>Apr 21 03:39:20 rpmi: perl-Compress-Raw-Bzip2-2.069-alt1.1 1448470068 installed <13>Apr 21 03:39:20 rpmi: perl-IO-Compress-2.069-alt1 1444618007 installed <13>Apr 21 03:39:20 rpmi: perl-HTTP-Message-6.11-alt1 1444616672 installed <13>Apr 21 03:39:20 rpmi: perl-HTTP-Cookies-6.01-alt1 1329759964 installed <13>Apr 21 03:39:20 rpmi: perl-HTTP-Negotiate-6.01-alt1 1329760563 installed <13>Apr 21 03:39:20 rpmi: perl-Net-HTTP-6.09-alt1 1432310023 installed <13>Apr 21 03:39:20 rpmi: perl-HTML-Tagset-3.20-alt2 1317725093 installed <13>Apr 21 03:39:20 rpmi: perl-HTML-Parser-3.72-alt1 1455170565 installed <13>Apr 21 03:39:20 rpmi: perl-libwww-6.15-alt1 1449505512 installed <13>Apr 21 03:39:20 rpmi: libgdk-pixbuf-locales-2.36.11-alt0.M80P.1 1507198237 installed <13>Apr 21 03:39:20 rpmi: gtk+3-themes-incompatible-3.20-alt3 1461944560 installed <13>Apr 21 03:39:20 rpmi: libproxy-0.4.14-alt0.M80P.1 1491211278 installed <13>Apr 21 03:39:20 rpmi: libnspr-1:4.20-alt1 p8.216526.40 1542382588 installed <13>Apr 21 03:39:20 rpmi: libnss-3.40.0-alt0.M80P.1 p8.216526.44 1542814585 installed <13>Apr 21 03:39:20 rpmi: libatk-locales-2.24.0-alt0.M80P.1 1504535386 installed <13>Apr 21 03:39:20 rpmi: libatk-2.24.0-alt0.M80P.1 1504535253 installed <13>Apr 21 03:39:20 rpmi: rpm-build-xdg-0.2-alt1 1250461503 installed <13>Apr 21 03:39:20 rpmi: shared-mime-info-1.9-alt0.M80P.1 1506021412 installed <13>Apr 21 03:39:20 rpmi: gsettings-desktop-schemas-data-3.24.1-alt0.M80P.1 1504768054 installed <13>Apr 21 03:39:20 rpmi: liblz4-1:1.7.5-alt1 1488766699 installed <13>Apr 21 03:39:20 rpmi: libgpg-error-1.31-alt1.M80P.1 1529024730 installed <13>Apr 21 03:39:20 rpmi: libgcrypt20-1.8.3-alt4 p8+219793.200.6.1 1551112986 installed <13>Apr 21 03:39:20 rpmi: libsystemd-1:239-alt1.M80P.1 p8+226956.200.2.1 1555431492 installed <13>Apr 21 03:39:20 rpmi: libdbus-1.10.24-alt4.M80P.1 1518773282 installed <13>Apr 21 03:39:20 rpmi: libavahi-0.6.31-alt7.M80P.1 1526318593 installed <13>Apr 21 03:39:20 rpmi: libxslt-1.1.28-alt4 1448210404 installed <13>Apr 21 03:39:20 rpmi: libX11-locales-3:1.6.3-alt1 1431956885 installed <13>Apr 21 03:39:20 rpmi: libXdmcp-1.1.1-alt1 1334617699 installed <13>Apr 21 03:39:20 rpmi: libXau-1.0.8-alt1 1369565807 installed <13>Apr 21 03:39:20 rpmi: libxcb-1.12-alt2 p8.218219.300 1545313310 installed <13>Apr 21 03:39:20 rpmi: libX11-3:1.6.3-alt1 1431956911 installed <13>Apr 21 03:39:20 rpmi: libXext-1.3.3-alt1 1409902932 installed <13>Apr 21 03:39:20 rpmi: libXrender-0.9.8-alt1 1371312110 installed <13>Apr 21 03:39:20 rpmi: libXfixes-5.0.1-alt1 1369809606 installed <13>Apr 21 03:39:20 rpmi: libat-spi2-core-2.24.1-alt0.M80P.1 1504536015 installed <13>Apr 21 03:39:20 rpmi: libXdamage-1.1.3-alt4 1297162596 installed <13>Apr 21 03:39:20 rpmi: libXcursor-1.1.15-alt1.M80P.1 1512373713 installed <13>Apr 21 03:39:20 rpmi: libXrandr-1.5.0-alt1 1431936188 installed <13>Apr 21 03:39:20 rpmi: libXtst-1.2.2-alt1 1369984880 installed <13>Apr 21 03:39:20 rpmi: libXxf86vm-1.1.3-alt1 1369984824 installed <13>Apr 21 03:39:21 rpmi: libGL-4:18.0.5-alt1.M80P.1 p8+226404.100.1.1 1554206332 installed <13>Apr 21 03:39:21 rpmi: libEGL-4:18.0.5-alt1.M80P.1 p8+226404.100.1.1 1554206332 installed <13>Apr 21 03:39:21 rpmi: libwayland-egl-4:18.0.5-alt1.M80P.1 p8+226404.100.1.1 1554206332 installed <13>Apr 21 03:39:21 rpmi: libXi-1.7.9-alt1.M80P.1 1515766146 installed <13>Apr 21 03:39:21 rpmi: libXinerama-1.1.3-alt1 1369984571 installed <13>Apr 21 03:39:21 rpmi: libXcomposite-0.4.3-alt3 1297306939 installed <13>Apr 21 03:39:21 rpmi: libtinfo-devel-5.9-alt8 1456756459 installed <13>Apr 21 03:39:21 rpmi: libncurses-devel-5.9-alt8 1456756459 installed <13>Apr 21 03:39:21 rpmi: python-modules-curses-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:21 rpmi: libverto-0.2.6-alt1_6 1455633234 installed <13>Apr 21 03:39:21 rpmi: libkeyutils-1.5.10-alt0.M80P.2 p8+216694.100.6.1 1547827915 installed <13>Apr 21 03:39:21 rpmi: libcom_err-1.42.13-alt2 1449075846 installed <13>Apr 21 03:39:21 rpmi: libgio-2.52.3-alt0.M80P.1 1504533271 installed <13>Apr 21 03:39:21 rpmi: gobject-introspection-1.52.1-alt0.M80P.1 1504533480 installed <13>Apr 21 03:39:21 rpmi: libssl10-1.0.2n-alt0.M80P.1 1512766129 installed <86>Apr 21 03:39:21 groupadd[56514]: group added to /etc/group: name=_keytab, GID=499 <86>Apr 21 03:39:21 groupadd[56514]: group added to /etc/gshadow: name=_keytab <86>Apr 21 03:39:21 groupadd[56514]: new group: name=_keytab, GID=499 <13>Apr 21 03:39:21 rpmi: libkrb5-1.14.6-alt1.M80P.1 1525355673 installed <13>Apr 21 03:39:21 rpmi: libgdk-pixbuf-2.36.11-alt0.M80P.1 1507198160 installed <13>Apr 21 03:39:21 rpmi: libp11-kit-0.23.8-alt1.M80P.1 1504877563 installed <86>Apr 21 03:39:21 groupadd[56695]: group added to /etc/group: name=messagebus, GID=498 <86>Apr 21 03:39:21 groupadd[56695]: group added to /etc/gshadow: name=messagebus <86>Apr 21 03:39:21 groupadd[56695]: new group: name=messagebus, GID=498 <86>Apr 21 03:39:21 useradd[56709]: new user: name=messagebus, UID=499, GID=498, home=/run/dbus, shell=/dev/null <13>Apr 21 03:39:21 rpmi: dbus-1.10.24-alt4.M80P.1 1518773282 installed <13>Apr 21 03:39:21 rpmi: gobject-introspection-x11-1.52.1-alt0.M80P.1 1504533480 installed <13>Apr 21 03:39:21 rpmi: gsettings-desktop-schemas-3.24.1-alt0.M80P.1 1504768052 installed <13>Apr 21 03:39:21 rpmi: libgusb-0.2.11-alt0.M80P.1 1503526571 installed <13>Apr 21 03:39:21 rpmi: libcolord-1.3.5-alt0.M80P.1 1488875506 installed <13>Apr 21 03:39:21 rpmi: libharfbuzz-1.6.3-alt0.M80P.1 1509918814 installed <13>Apr 21 03:39:21 rpmi: libfreetype-2.8-alt0.M80P.3 1505462817 installed <13>Apr 21 03:39:21 rpmi: fontconfig-2.12.6-alt1.M80P.1 1506008910 installed Updating fonts cache: <29>Apr 21 03:39:22 fontconfig: Updating fonts cache: succeeded [ DONE ] <13>Apr 21 03:39:22 rpmi: libcairo-1:1.14.4-alt1 1447005495 installed <13>Apr 21 03:39:22 rpmi: libcairo-gobject-1:1.14.4-alt1 1447005495 installed <13>Apr 21 03:39:22 rpmi: perl-XML-SAX-0.99-alt2 1384805188 installed <13>Apr 21 03:39:22 rpmi: perl-XML-Simple-2.22-alt1 1449506808 installed <13>Apr 21 03:39:22 rpmi: icon-naming-utils-0.8.90-alt1 1236573102 installed <13>Apr 21 03:39:23 rpmi: icon-theme-adwaita-3.24.0-alt0.M80P.1 1504567873 installed <13>Apr 21 03:39:23 rpmi: libXft-2.3.2-alt1 1409902650 installed <13>Apr 21 03:39:23 rpmi: libpango-1.40.14-alt0.M80P.1 1510824230 installed <13>Apr 21 03:39:23 rpmi: libpango-gir-1.40.14-alt0.M80P.1 1510824230 installed <13>Apr 21 03:39:23 rpmi: dbus-tools-gui-1.10.24-alt4.M80P.1 1518773282 installed <13>Apr 21 03:39:23 rpmi: at-spi2-core-2.24.1-alt0.M80P.1 1504536015 installed <13>Apr 21 03:39:23 rpmi: at-spi2-atk-2.24.1-alt0.M80P.1 1504536077 installed <13>Apr 21 03:39:24 rpmi: libgnutls30-3.6.7-alt0.M80P.1 p8+225835.100.2.1 1554226496 installed <13>Apr 21 03:39:24 rpmi: glib-networking-2.50.0-alt1.M80P.1 1503575484 installed <13>Apr 21 03:39:24 rpmi: libsoup-2.62.2-alt0.M80P.1 1528058397 installed <13>Apr 21 03:39:24 rpmi: libsoup-gnome-2.62.2-alt0.M80P.1 1528058397 installed <13>Apr 21 03:39:24 rpmi: librest-0.8.1-alt0.M80P.1 1509920808 installed <13>Apr 21 03:39:24 rpmi: gtk-update-icon-cache-2.24.31-alt1 1473461609 installed <13>Apr 21 03:39:24 rpmi: libgdk-pixbuf-gir-2.36.11-alt0.M80P.1 1507198160 installed <13>Apr 21 03:39:24 rpmi: libcups-2.1.0-alt2.M80P.4 p8+223280.100.1.1 1551372192 installed <86>Apr 21 03:39:24 groupadd[60699]: group added to /etc/group: name=sasl, GID=497 <86>Apr 21 03:39:24 groupadd[60699]: group added to /etc/gshadow: name=sasl <86>Apr 21 03:39:24 groupadd[60699]: new group: name=sasl, GID=497 <13>Apr 21 03:39:24 rpmi: libsasl2-3-2.1.26-alt7 1479477445 installed <13>Apr 21 03:39:24 rpmi: libldap-2.4.45-alt1.M80P.1 1513980376 installed <13>Apr 21 03:39:25 rpmi: libcurl-7.64.0-alt1 p8+220788.100.1.1 1549495393 installed <13>Apr 21 03:39:25 rpmi: libatk-gir-2.24.0-alt0.M80P.1 1504535253 installed <13>Apr 21 03:39:25 rpmi: libdconf-0.26.1-alt0.M80P.1 1507197872 installed <13>Apr 21 03:39:25 rpmi: dconf-0.26.1-alt0.M80P.1 1507197872 installed <13>Apr 21 03:39:25 rpmi: libgtk+3-schemas-3.22.26-alt0.M80P.1 1510825424 installed <13>Apr 21 03:39:25 rpmi: libpolkit-0.113-alt2 1469643478 installed <86>Apr 21 03:39:25 groupadd[60933]: group added to /etc/group: name=colord, GID=496 <86>Apr 21 03:39:25 groupadd[60933]: group added to /etc/gshadow: name=colord <86>Apr 21 03:39:25 groupadd[60933]: new group: name=colord, GID=496 <86>Apr 21 03:39:25 useradd[60967]: new user: name=colord, UID=498, GID=496, home=/var/colord, shell=/dev/null <13>Apr 21 03:39:25 rpmi: colord-1.3.5-alt0.M80P.1 1488875506 installed <13>Apr 21 03:39:25 rpmi: libjson-glib-1.2.8-alt0.M80P.1 1489956779 installed <13>Apr 21 03:39:27 rpmi: libgtk+3-3.22.26-alt0.M80P.1 1510825302 installed <13>Apr 21 03:39:27 rpmi: libgtk+3-gir-3.22.26-alt0.M80P.1 1510825302 installed <13>Apr 21 03:39:27 rpmi: perl-XML-LibXML-2.0124-alt1 1458750523 installed <13>Apr 21 03:39:27 rpmi: libp11-kit-trust-0.23.8-alt1.M80P.1 1504877563 installed <13>Apr 21 03:39:27 rpmi: dbus-tools-1.10.24-alt4.M80P.1 1518773282 installed <13>Apr 21 03:39:27 rpmi: python-modules-compiler-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python3-base-3.5.4-alt2.M80P.1 1527753911 installed <13>Apr 21 03:39:28 rpmi: python-modules-email-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python3-3.5.4-alt2.M80P.1 1527753911 installed <13>Apr 21 03:39:28 rpmi: rpm-build-python3-0.1.10.10-alt1.M80P.1 1530521451 installed <13>Apr 21 03:39:28 rpmi: python-modules-unittest-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-xml-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-module-six-1.10.0-alt7 1496087616 installed <13>Apr 21 03:39:28 rpmi: python-modules-encodings-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-ctypes-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-json-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-module-webencodings-0.5.1-alt0.M80P.1 1507185481 installed <13>Apr 21 03:39:28 rpmi: python-modules-multiprocessing-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-logging-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-module-cycler-0.10.0-alt1 1493854024 installed <13>Apr 21 03:39:28 rpmi: python-modules-hotshot-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-modules-bsddb-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:28 rpmi: python-strict-2.7.11-alt6.M80P.1 1527682534 installed <13>Apr 21 03:39:28 rpmi: python-module-dateutil-2.6.0-alt0.M80P.1 1507185893 installed <13>Apr 21 03:39:29 rpmi: python-module-pyparsing-2.0.3-alt1 1423469752 installed <13>Apr 21 03:39:29 rpmi: python-module-pycurl-7.19.5.3-alt1.1.1 1459598459 installed <13>Apr 21 03:39:29 rpmi: python-module-zc-1.0.0-alt6.2 1458024144 installed <13>Apr 21 03:39:29 rpmi: python-module-pygobject3-3.24.1-alt1.M80P.1 1504561842 installed <13>Apr 21 03:39:29 rpmi: python-module-pycairo-1.13.3-alt0.M80P.1 1504561359 installed <13>Apr 21 03:39:29 rpmi: python-dev-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:29 rpmi: python-module-backports-3.5.0.1-alt0.M80P.1 1525453027 installed <13>Apr 21 03:39:29 rpmi: python-module-backports.ssl_match_hostname-3.5.0.1-alt0.M80P.1 1525453027 installed <13>Apr 21 03:39:29 rpmi: python-module-functools32-3.2.3.2-alt1.git20150711 1438148996 installed <13>Apr 21 03:39:29 rpmi: python-module-simplejson-3.15.0-alt1.M80P.1 1528995430 installed <13>Apr 21 03:39:29 rpmi: python-modules-wsgiref-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:29 rpmi: python-tools-2to3-2.7.11-alt6.M80P.1 1527682470 installed <13>Apr 21 03:39:29 rpmi: python-module-numpy-1:1.12.1-alt0.M80P.1 1496160663 installed <13>Apr 21 03:39:29 rpmi: python-module-zope-3.3.0-alt8.4 1458261152 installed <13>Apr 21 03:39:29 rpmi: python-module-tornado-4.2.1-alt2.M80P.1 1496088637 installed <13>Apr 21 03:39:29 rpmi: python-module-numpy-testing-1:1.12.1-alt0.M80P.1 1496160663 installed <13>Apr 21 03:39:29 rpmi: python-module-zope.interface-4.1.3-alt1.dev0.git20150601.4 1459629874 installed <13>Apr 21 03:39:29 rpmi: python-module-html5lib-1:0.999999999-alt1.M80P.2 1507185603 installed <13>Apr 21 03:39:30 rpmi: python-module-lxml-4.0.0-alt0.M80P.1 1507163741 installed <13>Apr 21 03:39:30 rpmi: python-module-cssselect-0.9.1-alt1.1 1457858257 installed <13>Apr 21 03:39:30 rpmi: python-module-docutils-0.13-alt4.git20150716.1.1 1460400317 installed <13>Apr 21 03:39:30 rpmi: python-module-matplotlib-gtk3-2.0.0-alt3.M80P.1 1498628280 installed <13>Apr 21 03:39:31 rpmi: python-module-matplotlib-2.0.0-alt3.M80P.1 1498628280 installed <13>Apr 21 03:39:31 rpmi: python-module-matplotlib-cairo-2.0.0-alt3.M80P.1 1498628280 installed Installing python-module-theano-0.6.0-alt3.2.1.src.rpm Building target platforms: x86_64 Building for target x86_64 Executing(%prep): /bin/sh -e /usr/src/tmp/rpm-tmp.1615 + umask 022 + /bin/mkdir -p /usr/src/RPM/BUILD + cd /usr/src/RPM/BUILD + cd /usr/src/RPM/BUILD + rm -rf python-module-theano-0.6.0 + echo 'Source #0 (python-module-theano-0.6.0.tar):' Source #0 (python-module-theano-0.6.0.tar): + /bin/tar -xf /usr/src/RPM/SOURCES/python-module-theano-0.6.0.tar + cd python-module-theano-0.6.0 + /bin/chmod -c -Rf u+rwX,go-w . + cp -fR . ../python3 + exit 0 Executing(%build): /bin/sh -e /usr/src/tmp/rpm-tmp.20660 + umask 022 + /bin/mkdir -p /usr/src/RPM/BUILD + cd /usr/src/RPM/BUILD + cd python-module-theano-0.6.0 + export LC_ALL=en_US.UTF-8 + LC_ALL=en_US.UTF-8 + CFLAGS='-pipe -Wall -g -O2' + export CFLAGS + CXXFLAGS='-pipe -Wall -g -O2' + export CXXFLAGS + FFLAGS='-pipe -Wall -g -O2' + export FFLAGS + /usr/bin/python setup.py build --debug /usr/lib64/python2.7/distutils/dist.py:267: UserWarning: Unknown distribution option: 'install_requires' warnings.warn(msg) running build running build_py creating build creating build/lib creating build/lib/theano copying theano/version.py -> build/lib/theano copying theano/updates.py -> build/lib/theano copying theano/raise_op.py -> build/lib/theano copying theano/printing.py -> build/lib/theano copying theano/ifelse.py -> build/lib/theano copying theano/gradient.py -> build/lib/theano copying theano/generated_version.py -> build/lib/theano copying theano/configparser.py -> build/lib/theano copying theano/configdefaults.py -> build/lib/theano copying theano/__init__.py -> build/lib/theano creating build/lib/theano/tests copying theano/tests/unittest_tools.py -> build/lib/theano/tests copying theano/tests/test_updates.py -> build/lib/theano/tests copying theano/tests/test_tutorial.py -> build/lib/theano/tests copying theano/tests/test_rop.py -> build/lib/theano/tests copying theano/tests/test_printing.py -> build/lib/theano/tests copying theano/tests/test_ifelse.py -> build/lib/theano/tests copying theano/tests/test_gradient.py -> build/lib/theano/tests copying theano/tests/test_determinism.py -> build/lib/theano/tests copying theano/tests/test_config.py -> build/lib/theano/tests copying theano/tests/test_2nd_order_grads.py -> build/lib/theano/tests copying theano/tests/run_tests_in_batch.py -> build/lib/theano/tests copying theano/tests/record.py -> build/lib/theano/tests copying theano/tests/main.py -> build/lib/theano/tests copying theano/tests/diverse_tests.py -> build/lib/theano/tests copying theano/tests/disturb_mem.py -> build/lib/theano/tests copying theano/tests/__init__.py -> build/lib/theano/tests creating build/lib/theano/tensor copying theano/tensor/xlogx.py -> build/lib/theano/tensor copying theano/tensor/var.py -> build/lib/theano/tensor copying theano/tensor/utils.py -> build/lib/theano/tensor copying theano/tensor/type_other.py -> build/lib/theano/tensor copying theano/tensor/type.py -> build/lib/theano/tensor copying theano/tensor/subtensor.py -> build/lib/theano/tensor copying theano/tensor/sort.py -> build/lib/theano/tensor copying theano/tensor/sharedvar.py -> build/lib/theano/tensor copying theano/tensor/shared_randomstreams.py -> build/lib/theano/tensor copying theano/tensor/raw_random.py -> build/lib/theano/tensor copying theano/tensor/randomstreams.py -> build/lib/theano/tensor copying theano/tensor/opt_uncanonicalize.py -> build/lib/theano/tensor copying theano/tensor/opt.py -> build/lib/theano/tensor copying theano/tensor/io.py -> build/lib/theano/tensor copying theano/tensor/inplace.py -> build/lib/theano/tensor copying theano/tensor/fourier.py -> build/lib/theano/tensor copying theano/tensor/extra_ops.py -> build/lib/theano/tensor copying theano/tensor/elemwise_cgen.py -> build/lib/theano/tensor copying theano/tensor/elemwise.py -> build/lib/theano/tensor copying theano/tensor/blas_scipy.py -> build/lib/theano/tensor copying theano/tensor/blas_headers.py -> build/lib/theano/tensor copying theano/tensor/blas_c.py -> build/lib/theano/tensor copying theano/tensor/blas.py -> build/lib/theano/tensor copying theano/tensor/basic.py -> build/lib/theano/tensor copying theano/tensor/__init__.py -> build/lib/theano/tensor creating build/lib/theano/sparse copying theano/sparse/utils.py -> build/lib/theano/sparse copying theano/sparse/type.py -> build/lib/theano/sparse copying theano/sparse/sharedvar.py -> build/lib/theano/sparse copying theano/sparse/opt.py -> build/lib/theano/sparse copying theano/sparse/basic.py -> build/lib/theano/sparse copying theano/sparse/__init__.py -> build/lib/theano/sparse creating build/lib/theano/scan_module copying theano/scan_module/scan_views.py -> build/lib/theano/scan_module copying theano/scan_module/scan_utils.py -> build/lib/theano/scan_module copying theano/scan_module/scan_perform_ext.py -> build/lib/theano/scan_module copying theano/scan_module/scan_opt.py -> build/lib/theano/scan_module copying theano/scan_module/scan_op.py -> build/lib/theano/scan_module copying theano/scan_module/scan.py -> build/lib/theano/scan_module copying theano/scan_module/__init__.py -> build/lib/theano/scan_module creating build/lib/theano/scalar copying theano/scalar/sharedvar.py -> build/lib/theano/scalar copying theano/scalar/basic_sympy.py -> build/lib/theano/scalar copying theano/scalar/basic_scipy.py -> build/lib/theano/scalar copying theano/scalar/basic.py -> build/lib/theano/scalar copying theano/scalar/__init__.py -> build/lib/theano/scalar creating build/lib/theano/sandbox copying theano/sandbox/theano_object.py -> build/lib/theano/sandbox copying theano/sandbox/test_theano_object.py -> build/lib/theano/sandbox copying theano/sandbox/test_scan.py -> build/lib/theano/sandbox copying theano/sandbox/test_rng_mrg.py -> build/lib/theano/sandbox copying theano/sandbox/test_neighbours.py -> build/lib/theano/sandbox copying theano/sandbox/test_neighbourhoods.py -> build/lib/theano/sandbox copying theano/sandbox/test_multinomial.py -> build/lib/theano/sandbox copying theano/sandbox/symbolic_module.py -> build/lib/theano/sandbox copying theano/sandbox/solve.py -> build/lib/theano/sandbox copying theano/sandbox/softsign.py -> build/lib/theano/sandbox copying theano/sandbox/scan.py -> build/lib/theano/sandbox copying theano/sandbox/rng_mrg.py -> build/lib/theano/sandbox copying theano/sandbox/neighbours.py -> build/lib/theano/sandbox copying theano/sandbox/neighbourhoods.py -> build/lib/theano/sandbox copying theano/sandbox/multinomial.py -> build/lib/theano/sandbox copying theano/sandbox/minimal.py -> build/lib/theano/sandbox copying theano/sandbox/fourier.py -> build/lib/theano/sandbox copying theano/sandbox/downsample.py -> build/lib/theano/sandbox copying theano/sandbox/debug.py -> build/lib/theano/sandbox copying theano/sandbox/conv.py -> build/lib/theano/sandbox copying theano/sandbox/__init__.py -> build/lib/theano/sandbox creating build/lib/theano/misc copying theano/misc/windows.py -> build/lib/theano/misc copying theano/misc/strutil.py -> build/lib/theano/misc copying theano/misc/safe_asarray.py -> build/lib/theano/misc copying theano/misc/pycuda_utils.py -> build/lib/theano/misc copying theano/misc/pycuda_init.py -> build/lib/theano/misc copying theano/misc/pycuda_example.py -> build/lib/theano/misc copying theano/misc/pkl_utils.py -> build/lib/theano/misc copying theano/misc/ordered_set.py -> build/lib/theano/misc copying theano/misc/nose_pr.py -> build/lib/theano/misc copying theano/misc/may_share_memory.py -> build/lib/theano/misc copying theano/misc/latence_gpu_transfert.py -> build/lib/theano/misc copying theano/misc/gnumpy_utils.py -> build/lib/theano/misc copying theano/misc/gh_api.py -> build/lib/theano/misc copying theano/misc/doubleop.py -> build/lib/theano/misc copying theano/misc/cudamat_utils.py -> build/lib/theano/misc copying theano/misc/cpucount.py -> build/lib/theano/misc copying theano/misc/check_duplicate_key.py -> build/lib/theano/misc copying theano/misc/check_blas.py -> build/lib/theano/misc copying theano/misc/buildbot_filter.py -> build/lib/theano/misc copying theano/misc/__init__.py -> build/lib/theano/misc creating build/lib/theano/gof copying theano/gof/vm.py -> build/lib/theano/gof copying theano/gof/utils.py -> build/lib/theano/gof copying theano/gof/unify.py -> build/lib/theano/gof copying theano/gof/type.py -> build/lib/theano/gof copying theano/gof/toolbox.py -> build/lib/theano/gof copying theano/gof/sched.py -> build/lib/theano/gof copying theano/gof/python25.py -> build/lib/theano/gof copying theano/gof/optdb.py -> build/lib/theano/gof copying theano/gof/opt.py -> build/lib/theano/gof copying theano/gof/op.py -> build/lib/theano/gof copying theano/gof/null_type.py -> build/lib/theano/gof copying theano/gof/link.py -> build/lib/theano/gof copying theano/gof/lazylinker_c.py -> build/lib/theano/gof copying theano/gof/graph.py -> build/lib/theano/gof copying theano/gof/fg.py -> build/lib/theano/gof copying theano/gof/destroyhandler.py -> build/lib/theano/gof copying theano/gof/cutils.py -> build/lib/theano/gof copying theano/gof/compilelock.py -> build/lib/theano/gof copying theano/gof/compiledir.py -> build/lib/theano/gof copying theano/gof/cmodule.py -> build/lib/theano/gof copying theano/gof/cc.py -> build/lib/theano/gof copying theano/gof/callcache.py -> build/lib/theano/gof copying theano/gof/__init__.py -> build/lib/theano/gof creating build/lib/theano/compile copying theano/compile/sharedvalue.py -> build/lib/theano/compile copying theano/compile/profiling.py -> build/lib/theano/compile copying theano/compile/profilemode.py -> build/lib/theano/compile copying theano/compile/pfunc.py -> build/lib/theano/compile copying theano/compile/ops.py -> build/lib/theano/compile copying theano/compile/monitormode.py -> build/lib/theano/compile copying theano/compile/module.py -> build/lib/theano/compile copying theano/compile/mode.py -> build/lib/theano/compile copying theano/compile/io.py -> build/lib/theano/compile copying theano/compile/function_module.py -> build/lib/theano/compile copying theano/compile/function.py -> build/lib/theano/compile copying theano/compile/debugmode.py -> build/lib/theano/compile copying theano/compile/builders.py -> build/lib/theano/compile copying theano/compile/__init__.py -> build/lib/theano/compile creating build/lib/theano/compat copying theano/compat/six.py -> build/lib/theano/compat copying theano/compat/python2x.py -> build/lib/theano/compat copying theano/compat/__init__.py -> build/lib/theano/compat creating build/lib/theano/tensor/tests copying theano/tensor/tests/test_xlogx.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_utils.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_subtensor.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_sort.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_sharedvar.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_shared_randomstreams.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_raw_random.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_randomstreams.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_opt_uncanonicalize.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_opt.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_naacl09.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_mpi.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_misc.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_merge.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_keepdims.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_io.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_inc_subtensor.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_gc.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_fourier.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_extra_ops.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_elemwise.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_complex.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_casting.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_blas_scipy.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_blas_c.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_blas.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/test_basic.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/mlp_test.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/_test_mpi_roundtrip.py -> build/lib/theano/tensor/tests copying theano/tensor/tests/__init__.py -> build/lib/theano/tensor/tests creating build/lib/theano/tensor/signal copying theano/tensor/signal/downsample.py -> build/lib/theano/tensor/signal copying theano/tensor/signal/conv.py -> build/lib/theano/tensor/signal copying theano/tensor/signal/__init__.py -> build/lib/theano/tensor/signal creating build/lib/theano/tensor/nnet copying theano/tensor/nnet/sigm.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/nnet.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/conv3d2d.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/conv.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/__init__.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/ConvTransp3D.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/ConvGrad3D.py -> build/lib/theano/tensor/nnet copying theano/tensor/nnet/Conv3D.py -> build/lib/theano/tensor/nnet creating build/lib/theano/tensor/deprecated copying theano/tensor/deprecated/test_rmodule.py -> build/lib/theano/tensor/deprecated copying theano/tensor/deprecated/rmodule.py -> build/lib/theano/tensor/deprecated copying theano/tensor/deprecated/__init__.py -> build/lib/theano/tensor/deprecated creating build/lib/theano/sparse/tests copying theano/sparse/tests/test_utils.py -> build/lib/theano/sparse/tests copying theano/sparse/tests/test_sp2.py -> build/lib/theano/sparse/tests copying theano/sparse/tests/test_opt.py -> build/lib/theano/sparse/tests copying theano/sparse/tests/test_basic.py -> build/lib/theano/sparse/tests copying theano/sparse/tests/__init__.py -> build/lib/theano/sparse/tests creating build/lib/theano/sparse/sandbox copying theano/sparse/sandbox/truedot.py -> build/lib/theano/sparse/sandbox copying theano/sparse/sandbox/test_sp.py -> build/lib/theano/sparse/sandbox copying theano/sparse/sandbox/sp2.py -> build/lib/theano/sparse/sandbox copying theano/sparse/sandbox/sp.py -> build/lib/theano/sparse/sandbox copying theano/sparse/sandbox/__init__.py -> build/lib/theano/sparse/sandbox creating build/lib/theano/scan_module/tests copying theano/scan_module/tests/test_scan_opt.py -> build/lib/theano/scan_module/tests copying theano/scan_module/tests/test_scan.py -> build/lib/theano/scan_module/tests copying theano/scan_module/tests/__init__.py -> build/lib/theano/scan_module/tests creating build/lib/theano/scalar/tests copying theano/scalar/tests/test_basic_sympy.py -> build/lib/theano/scalar/tests copying theano/scalar/tests/test_basic.py -> build/lib/theano/scalar/tests copying theano/scalar/tests/__init__.py -> build/lib/theano/scalar/tests creating build/lib/theano/sandbox/scan_module copying theano/sandbox/scan_module/scan_utils.py -> build/lib/theano/sandbox/scan_module copying theano/sandbox/scan_module/scan_op.py -> build/lib/theano/sandbox/scan_module copying theano/sandbox/scan_module/scan.py -> build/lib/theano/sandbox/scan_module copying theano/sandbox/scan_module/__init__.py -> build/lib/theano/sandbox/scan_module creating build/lib/theano/sandbox/linalg copying theano/sandbox/linalg/ops.py -> build/lib/theano/sandbox/linalg copying theano/sandbox/linalg/kron.py -> build/lib/theano/sandbox/linalg copying theano/sandbox/linalg/__init__.py -> build/lib/theano/sandbox/linalg creating build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/type.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/subtensor.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/opt.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/elemwise.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/blas.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/basic_ops.py -> build/lib/theano/sandbox/gpuarray copying theano/sandbox/gpuarray/__init__.py -> build/lib/theano/sandbox/gpuarray creating build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/var.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/type.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/rng_curand.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/opt.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/nvcc_compiler.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/nnet.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/neighbours.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/kernel_codegen.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/elemwise.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/blas.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/basic_ops.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/__init__.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/GpuConvTransp3D.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/GpuConvGrad3D.py -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/GpuConv3D.py -> build/lib/theano/sandbox/cuda creating build/lib/theano/misc/tests copying theano/misc/tests/test_pycuda_utils.py -> build/lib/theano/misc/tests copying theano/misc/tests/test_pycuda_theano_simple.py -> build/lib/theano/misc/tests copying theano/misc/tests/test_pycuda_example.py -> build/lib/theano/misc/tests copying theano/misc/tests/test_may_share_memory.py -> build/lib/theano/misc/tests copying theano/misc/tests/test_gnumpy_utils.py -> build/lib/theano/misc/tests copying theano/misc/tests/test_cudamat_utils.py -> build/lib/theano/misc/tests copying theano/misc/tests/__init__.py -> build/lib/theano/misc/tests creating build/lib/theano/gof/tests copying theano/gof/tests/test_vm.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_utils.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_types.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_toolbox.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_sched.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_optdb.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_opt.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_op.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_link.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_lazy.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_graph.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_fg.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_destroyhandler.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_compute_test_value.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_cmodule.py -> build/lib/theano/gof/tests copying theano/gof/tests/test_cc.py -> build/lib/theano/gof/tests copying theano/gof/tests/__init__.py -> build/lib/theano/gof/tests creating build/lib/theano/compile/tests copying theano/compile/tests/test_shared.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_pfunc.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_monitormode.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_module.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_modes.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_misc.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_inplace_opt_for_value.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_function_module.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_debugmode.py -> build/lib/theano/compile/tests copying theano/compile/tests/test_builders.py -> build/lib/theano/compile/tests copying theano/compile/tests/__init__.py -> build/lib/theano/compile/tests creating build/lib/theano/compile/sandbox copying theano/compile/sandbox/__init__.py -> build/lib/theano/compile/sandbox creating build/lib/theano/tensor/signal/tests copying theano/tensor/signal/tests/test_downsample.py -> build/lib/theano/tensor/signal/tests copying theano/tensor/signal/tests/test_conv.py -> build/lib/theano/tensor/signal/tests copying theano/tensor/signal/tests/__init__.py -> build/lib/theano/tensor/signal/tests creating build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/test_sigm.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/test_nnet.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/test_conv3d2d.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/test_conv3d.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/test_conv.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/speed_test_conv.py -> build/lib/theano/tensor/nnet/tests copying theano/tensor/nnet/tests/__init__.py -> build/lib/theano/tensor/nnet/tests creating build/lib/theano/sandbox/scan_module/tests copying theano/sandbox/scan_module/tests/test_utils.py -> build/lib/theano/sandbox/scan_module/tests copying theano/sandbox/scan_module/tests/test_scan.py -> build/lib/theano/sandbox/scan_module/tests copying theano/sandbox/scan_module/tests/__init__.py -> build/lib/theano/sandbox/scan_module/tests creating build/lib/theano/sandbox/linalg/tests copying theano/sandbox/linalg/tests/test_linalg.py -> build/lib/theano/sandbox/linalg/tests copying theano/sandbox/linalg/tests/test_kron.py -> build/lib/theano/sandbox/linalg/tests copying theano/sandbox/linalg/tests/__init__.py -> build/lib/theano/sandbox/linalg/tests creating build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_type.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_subtensor.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_opt.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_elemwise.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_blas.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/test_basic_ops.py -> build/lib/theano/sandbox/gpuarray/tests copying theano/sandbox/gpuarray/tests/__init__.py -> build/lib/theano/sandbox/gpuarray/tests creating build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/walltime.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_viewop.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_var.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_tensor_op.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_rng_curand.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_opt.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_nvcc_compiler.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_nnet.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_neighbours.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_mlp.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_memory.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_gradient.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_driver.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_cuda_ndarray.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_blas.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_bench_loopfusion.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/test_basic_ops.py -> build/lib/theano/sandbox/cuda/tests copying theano/sandbox/cuda/tests/__init__.py -> build/lib/theano/sandbox/cuda/tests copying theano/scan_module/scan_perform.c -> build/lib/theano/scan_module copying theano/sandbox/samples_MRG31k3p_12_7_5.txt -> build/lib/theano/sandbox copying theano/misc/check_blas_many.sh -> build/lib/theano/misc copying theano/gof/lazylinker_c.c -> build/lib/theano/gof copying theano/tensor/tests/shape_opt_cycle.pkl -> build/lib/theano/tensor/tests copying theano/sandbox/cuda/cuda_ndarray.cu -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/conv_kernel.cu -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/conv_full_kernel.cu -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/conv.cu -> build/lib/theano/sandbox/cuda copying theano/sandbox/cuda/cuda_ndarray.cuh -> build/lib/theano/sandbox/cuda running build_scripts creating build/scripts-2.7 copying and adjusting bin/theano-cache -> build/scripts-2.7 copying and adjusting bin/theano-nose -> build/scripts-2.7 copying and adjusting bin/theano-test -> build/scripts-2.7 changing mode of build/scripts-2.7/theano-cache from 644 to 755 changing mode of build/scripts-2.7/theano-nose from 644 to 755 changing mode of build/scripts-2.7/theano-test from 644 to 755 + pushd ../python3 ~/RPM/BUILD/python3 ~/RPM/BUILD/python-module-theano-0.6.0 + find -type f -name '*.py' -exec 2to3 -w -n '{}' + RefactoringTool: Skipping optional fixer: buffer RefactoringTool: Skipping optional fixer: idioms RefactoringTool: Skipping optional fixer: set_literal RefactoringTool: Skipping optional fixer: ws_comma RefactoringTool: No changes to ./theano/version.py RefactoringTool: Refactored ./theano/updates.py RefactoringTool: Refactored ./theano/tests/unittest_tools.py --- ./theano/updates.py (original) +++ ./theano/updates.py (refactored) @@ -69,7 +69,7 @@ 'non-ordered dictionary with 2+ elements could ' 'make your code non-deterministic', stacklevel=2) - for key, val in OrderedDict(other).iteritems(): + for key, val in OrderedDict(other).items(): if key in self: if self[key] == val: continue --- ./theano/tests/unittest_tools.py (original) +++ ./theano/tests/unittest_tools.py (refactored) @@ -1,6 +1,6 @@ from copy import copy, deepcopy import logging -from StringIO import StringIO +from io import StringIO import sys import unittest @@ -58,8 +58,8 @@ else: seed = None except ValueError: - print >> sys.stderr, ('Error: config.unittests.rseed contains ' - 'invalid seed, using None instead') + print(('Error: config.unittests.rseed contains ' + 'invalid seed, using None instead'), file=sys.stderr) seed = None return seed @@ -73,8 +73,8 @@ seed = fetch_seed(pseed) if pseed and pseed != seed: - print >> sys.stderr, 'Warning: using seed given by config.unittests.rseed=%i'\ - 'instead of seed %i given as parameter' % (seed, pseed) + print('Warning: using seed given by config.unittests.rseed=%i'\ + 'instead of seed %i given as parameter' % (seed, pseed), file=sys.stderr) numpy.random.seed(seed) return seed @@ -103,7 +103,7 @@ class TestOptimizationMixin(object): - def assertFunctionContains(self, f, op, min=1, max=sys.maxint): + def assertFunctionContains(self, f, op, min=1, max=sys.maxsize): toposort = f.maker.fgraph.toposort() matches = [node for node in toposort if node.op == op] assert (min <= len(matches) <= max), (toposort, matches, @@ -118,7 +118,7 @@ def assertFunctionContainsN(self, f, op, N): return self.assertFunctionContains(f, op, min=N, max=N) - def assertFunctionContainsClass(self, f, op, min=1, max=sys.maxint): + def assertFunctionContainsClass(self, f, op, min=1, max=sys.maxsize): toposort = f.maker.fgraph.toposort() matches = [node for node in toposort if isinstance(node.op, op)] assert (min <= len(matches) <= max), (toposort, matches, @@ -254,54 +254,54 @@ try: ssio = StringIO() - print >> ssio, " : shape, dtype, strides, min, max, n_inf, n_nan:" - print >> ssio, " Expected :", - print >> ssio, expected.shape, - print >> ssio, expected.dtype, - print >> ssio, expected.strides, - print >> ssio, expected.min(), - print >> ssio, expected.max(), - print >> ssio, numpy.isinf(expected).sum(), - print >> ssio, numpy.isnan(expected).sum(), + print(" : shape, dtype, strides, min, max, n_inf, n_nan:", file=ssio) + print(" Expected :", end=' ', file=ssio) + print(expected.shape, end=' ', file=ssio) + print(expected.dtype, end=' ', file=ssio) + print(expected.strides, end=' ', file=ssio) + print(expected.min(), end=' ', file=ssio) + print(expected.max(), end=' ', file=ssio) + print(numpy.isinf(expected).sum(), end=' ', file=ssio) + print(numpy.isnan(expected).sum(), end=' ', file=ssio) # only if all succeeds to we add anything to sio - print >> sio, ssio.getvalue() + print(ssio.getvalue(), file=sio) except Exception: pass try: ssio = StringIO() - print >> ssio, " Value :", - print >> ssio, value.shape, - print >> ssio, value.dtype, - print >> ssio, value.strides, - print >> ssio, value.min(), - print >> ssio, value.max(), - print >> ssio, numpy.isinf(value).sum(), - print >> ssio, numpy.isnan(value).sum(), + print(" Value :", end=' ', file=ssio) + print(value.shape, end=' ', file=ssio) RefactoringTool: No changes to ./theano/tests/test_updates.py RefactoringTool: Refactored ./theano/tests/test_tutorial.py + print(value.dtype, end=' ', file=ssio) + print(value.strides, end=' ', file=ssio) + print(value.min(), end=' ', file=ssio) + print(value.max(), end=' ', file=ssio) + print(numpy.isinf(value).sum(), end=' ', file=ssio) + print(numpy.isnan(value).sum(), end=' ', file=ssio) # only if all succeeds to we add anything to sio - print >> sio, ssio.getvalue() + print(ssio.getvalue(), file=sio) except Exception: pass - print >> sio, " expected :", expected - print >> sio, " value :", value + print(" expected :", expected, file=sio) + print(" value :", value, file=sio) try: ov = numpy.asarray(expected) nv = numpy.asarray(value) ssio = StringIO() absdiff = numpy.absolute(nv - ov) - print >> ssio, " Max Abs Diff: ", numpy.max(absdiff) - print >> ssio, " Mean Abs Diff: ", numpy.mean(absdiff) - print >> ssio, " Median Abs Diff: ", numpy.median(absdiff) - print >> ssio, " Std Abs Diff: ", numpy.std(absdiff) + print(" Max Abs Diff: ", numpy.max(absdiff), file=ssio) + print(" Mean Abs Diff: ", numpy.mean(absdiff), file=ssio) + print(" Median Abs Diff: ", numpy.median(absdiff), file=ssio) + print(" Std Abs Diff: ", numpy.std(absdiff), file=ssio) reldiff = numpy.absolute(nv - ov) / (numpy.absolute(nv) + numpy.absolute(ov)) - print >> ssio, " Max Rel Diff: ", numpy.max(reldiff) - print >> ssio, " Mean Rel Diff: ", numpy.mean(reldiff) - print >> ssio, " Median Rel Diff: ", numpy.median(reldiff) - print >> ssio, " Std Rel Diff: ", numpy.std(reldiff) + print(" Max Rel Diff: ", numpy.max(reldiff), file=ssio) + print(" Mean Rel Diff: ", numpy.mean(reldiff), file=ssio) + print(" Median Rel Diff: ", numpy.median(reldiff), file=ssio) + print(" Std Rel Diff: ", numpy.std(reldiff), file=ssio) # only if all succeeds to we add anything to sio - print >> sio, ssio.getvalue() + print(ssio.getvalue(), file=sio) except Exception: pass #Use the same formula as in _allclose to find the tolerance used @@ -317,7 +317,7 @@ rtol_ = rtol if atol is not None: atol_ = atol - print >> sio, " rtol, atol:", rtol_, atol_ + print(" rtol, atol:", rtol_, atol_, file=sio) return sio.getvalue() --- ./theano/tests/test_tutorial.py (original) +++ ./theano/tests/test_tutorial.py (refactored) @@ -713,7 +713,7 @@ def test_loading_and_saving_1(self): - import cPickle + import pickle import theano, theano.tensor x = theano.tensor.matrix() @@ -734,12 +734,12 @@ os.chdir(tmpdir) f = open('obj.save', 'wb') - cPickle.dump(my_obj, f, protocol=cPickle.HIGHEST_PROTOCOL) + pickle.dump(my_obj, f, protocol=pickle.HIGHEST_PROTOCOL) f.close() f = open('obj.save', 'rb') - loaded_obj = cPickle.load(f) + loaded_obj = pickle.load(f) f.close() obj1 = my_obj @@ -748,13 +748,13 @@ f = open('objects.save', 'wb') for obj in [obj1, obj2, obj3]: - cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL) + pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL) f.close() f = open('objects.save', 'rb') loaded_objects = [] for i in range(3): - loaded_objects.append(cPickle.load(f)) + loaded_objects.append(pickle.load(f)) f.close() finally: # Get back to the orinal dir, and temporary one. @@ -800,15 +800,15 @@ f = function([], T.exp(x)) # print f.maker.fgraph.toposort() t0 = time.time() - for i in xrange(iters): + for i in ranRefactoringTool: No changes to ./theano/tests/test_rop.py RefactoringTool: Refactored ./theano/tests/test_printing.py RefactoringTool: Refactored ./theano/tests/test_ifelse.py ge(iters): r = f() t1 = time.time() - print 'Looping %d times took' % iters, t1 - t0, 'seconds' - print 'Result is', r + print('Looping %d times took' % iters, t1 - t0, 'seconds') + print('Result is', r) if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]): - print 'Used the cpu' + print('Used the cpu') else: - print 'Used the gpu' + print('Used the gpu') if theano.config.device.find('gpu') > -1: assert not numpy.any( [isinstance(x.op,T.Elemwise) for x in f.maker.fgraph.toposort()]) else: @@ -832,16 +832,16 @@ f = function([], sandbox.cuda.basic_ops.gpu_from_host(T.exp(x))) # print f.maker.fgraph.toposort() t0 = time.time() - for i in xrange(iters): + for i in range(iters): r = f() t1 = time.time() - print 'Looping %d times took' % iters, t1 - t0, 'seconds' - print 'Result is', r - print 'Numpy result is', numpy.asarray(r) + print('Looping %d times took' % iters, t1 - t0, 'seconds') + print('Result is', r) + print('Numpy result is', numpy.asarray(r)) if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]): - print 'Used the cpu' + print('Used the cpu') else: - print 'Used the gpu' + print('Used the gpu') assert not numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]) @@ -864,17 +864,17 @@ borrow=True)) # print f.maker.fgraph.toposort() t0 = time.time() - for i in xrange(iters): + for i in range(iters): r = f() t1 = time.time() - print 'Looping %d times took' % iters, t1 - t0, 'seconds' - print 'Result is', r - print 'Numpy result is', numpy.asarray(r) + print('Looping %d times took' % iters, t1 - t0, 'seconds') + print('Result is', r) + print('Numpy result is', numpy.asarray(r)) if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]): - print 'Used the cpu' + print('Used the cpu') else: - print 'Used the gpu' + print('Used the gpu') assert not numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]) --- ./theano/tests/test_printing.py (original) +++ ./theano/tests/test_printing.py (refactored) @@ -107,8 +107,8 @@ E. E""" if mis != reference: - print '--' + mis + '--' - print '--' + reference + '--' + print('--' + mis + '--') + print('--' + reference + '--') assert mis == reference @@ -142,8 +142,8 @@ """ if s != reference: - print '--' + s + '--' - print '--' + reference + '--' + print('--' + s + '--') + print('--' + reference + '--') assert s == reference @@ -162,8 +162,8 @@ """ if s != reference: - print '--' + s + '--' - print '--' + reference + '--' + print('--' + s + '--') + print('--' + reference + '--') assert s == reference @@ -180,8 +180,8 @@ """ if s != reference: - print '--' + s + '--' - print '--' + reference + '--' + print('--' + s + '--') + print('--' + reference + '--') assert s == reference @@ -199,7 +199,7 @@ |E """ if s != reference: - print '--' + s + '--' - print '--' + reference + '--' - - assert s == reference + print('--' + s + '--') + print('--' + reference + '--') + + assert s == reference --- ./theano/tests/test_ifelse.py (original) +++ ./theano/tests/test_ifelse.py (refactored) @@ -426,4 +426,4 @@ if __name__ == '__main__': - RefactoringTool: Refactored ./theano/tests/test_gradient.py RefactoringTool: Refactored ./theano/tests/test_determinism.py RefactoringTool: No changes to ./theano/tests/test_config.py RefactoringTool: Refactored ./theano/tests/test_2nd_order_grads.py RefactoringTool: Refactored ./theano/tests/run_tests_in_batch.py print ' Use nosetests to run these tests ' + print(' Use nosetests to run these tests ') --- ./theano/tests/test_gradient.py (original) +++ ./theano/tests/test_gradient.py (refactored) @@ -22,8 +22,8 @@ """ if inputs is None: inputs = theano.gof.graph.inputs([source[0] for source in sources]) - return dict(zip(inputs,theano.gradient.grad(cost=None, known_grads=dict(sources), - wrt=inputs, consider_constant=inputs))) + return dict(list(zip(inputs,theano.gradient.grad(cost=None, known_grads=dict(sources), + wrt=inputs, consider_constant=inputs)))) class testgrad_sources_inputs(unittest.TestCase): @@ -460,9 +460,9 @@ true_grads = true_grads(*values) for layer in layers: - print 'Testing by separately computing ',layer + print('Testing by separately computing ',layer) first = theano.tensor.grad(cost, layer, disconnected_inputs='ignore') - known = dict(zip(layer, first)) + known = dict(list(zip(layer, first))) full = theano.tensor.grad(cost=None, known_grads=known,wrt=inputs, disconnected_inputs='ignore') full = theano.function(inputs, full) @@ -470,13 +470,13 @@ assert len(true_grads) == len(full) for a, b, var in zip(true_grads, full, inputs): if not np.allclose(a, b): - print 'Failure' - print a - print b - print var - print layer + print('Failure') + print(a) + print(b) + print(var) + print(layer) for v in known: - print v,':',theano.function(inputs,known[v])(*values) + print(v,':',theano.function(inputs,known[v])(*values)) assert False def test_dxdx(): --- ./theano/tests/test_determinism.py (original) +++ ./theano/tests/test_determinism.py (refactored) @@ -82,7 +82,7 @@ log = run(0) # Do several trials, since failure doesn't always occur # (Sometimes you sample the same outcome twice in a row) - for i in xrange(10): + for i in range(10): run(1, log) if __name__ == '__main__': --- ./theano/tests/test_2nd_order_grads.py (original) +++ ./theano/tests/test_2nd_order_grads.py (refactored) @@ -57,7 +57,7 @@ y = 2 * x.sum(axis=0) rng = numpy.random.RandomState(seed=utt.fetch_seed()) ev = numpy.zeros((10, 10, 10)) - for dx in xrange(10): + for dx in range(10): ev[dx, :, dx] = 2. # test when the jacobian is called with a tensor as wrt @@ -90,7 +90,7 @@ vJs = f(vx, vz) evx = numpy.zeros((10, 10, 10)) evz = numpy.zeros((10, 10, 10)) - for dx in xrange(10): + for dx in range(10): evx[dx, dx, :] = vx[dx, :] evz[dx, dx, :] = vz[dx, :] assert numpy.allclose(vJs[0], evz) --- ./theano/tests/run_tests_in_batch.py (original) +++ ./theano/tests/run_tests_in_batch.py (refactored) @@ -54,7 +54,7 @@ """ -import cPickle +import pickle import datetime import os import subprocess @@ -131,10 +131,10 @@ os.remove('.noseids') # Collect test IDs. - print """\ + print("""\ #################### # COLLECTING TESTS # -####################""" +####################""") stdout.flush() stderr.flush() dummy_in = open(os.devnull) @@ -152,7 +152,7 @@ stderr.flush() assert rval == 0 noseids_file = '.noseids' - data = cPickle.load(open(noseids_file, 'rb')) + data = pickle.load(open(noseids_file, 'rb')) ids = data['ids'] n_tests = len(ids) if n_tests == 0: @@ -162,20 +162,20 @@ # Standard batch testing is called for if not time_profile: failed = set() - print """\ + print("""\ ################################### # RUNNING TESTS IN BATCHES OF %s # -###################################""" % batch_size +###################################""" % batch_size) # When `display_batch_output` is False, we suppress all output because # we want the useRefactoringTool: Refactored ./theano/tests/record.py r to focus only on the failed tests, which are re-run # (with output) below. dummy_out = open(os.devnull, 'w') - for test_id in xrange(1, n_tests + 1, batch_size): + for test_id in range(1, n_tests + 1, batch_size): stdout.flush() stderr.flush() - test_range = range(test_id, min(test_id + batch_size, n_tests + 1)) + test_range = list(range(test_id, min(test_id + batch_size, n_tests + 1))) cmd = ([python, theano_nose, '--with-id'] + - map(str, test_range) + + list(map(str, test_range)) + argv) subprocess_extra_args = dict(stdin=dummy_in.fileno()) if not display_batch_output: @@ -193,18 +193,18 @@ # otherwise this field may get erased. We use a set because it # seems like it is not systematically erased though, and we want # to avoid duplicates. - failed = failed.union(cPickle.load(open(noseids_file, 'rb')) + failed = failed.union(pickle.load(open(noseids_file, 'rb')) ['failed']) - print '%s%% done in %.3fs (failed: %s)' % ( - (test_range[-1] * 100) // n_tests, t1 - t0, len(failed)) + print('%s%% done in %.3fs (failed: %s)' % ( + (test_range[-1] * 100) // n_tests, t1 - t0, len(failed))) # Sort for cosmetic purpose only. failed = sorted(failed) if failed: # Re-run only failed tests - print """\ + print("""\ ################################ # RE-RUNNING FAILED TESTS ONLY # -################################""" +################################""") stdout.flush() stderr.flush() subprocess.call( @@ -218,17 +218,17 @@ stderr.flush() return 0 else: - print """\ + print("""\ #################### # ALL TESTS PASSED # -####################""" +####################""") # Time-profiling is called for else: - print """\ + print("""\ ######################################## # RUNNING TESTS IN TIME-PROFILING MODE # -########################################""" +########################################""") # finds first word of list l containing string s def getIndexOfFirst(l, s): @@ -262,8 +262,8 @@ f_nosort.write('TIME-PROFILING OF THEANO\'S NOSETESTS' ' (by sequential id)\n\n' + stamp + fields) f_nosort.flush() - for test_floor in xrange(1, n_tests + 1, batch_size): - for test_id in xrange(test_floor, min(test_floor + batch_size, + for test_floor in range(1, n_tests + 1, batch_size): + for test_id in range(test_floor, min(test_floor + batch_size, n_tests + 1)): # Print the test we will start in the raw log to help # debug tests that are too long. @@ -333,7 +333,7 @@ f_nosort.write(s_nosort) f_nosort.flush() - print '%s%% time-profiled' % ((test_id * 100) // n_tests) + print('%s%% time-profiled' % ((test_id * 100) // n_tests)) f_rawlog.close() # sorting tests according to running-time @@ -345,7 +345,7 @@ f_sort = open(path_sort, 'w') f_sort.write('TIME-PROFILING OF THEANO\'S NOSETESTS' ' (sorted by computation time)\n\n' + stamp + fields) - for i in xrange(len(prof_master_nosort)): + for i in range(len(prof_master_nosort)): s_sort = ((str(prof_master_sort[i][0]) + 's').ljust(10) + " " + prof_master_sort[i][1].ljust(7) + " " + prof_master_sort[i][2] + prof_master_sort[i][3] + --- ./theano/tests/record.py (original) +++ ./theano/tests/record.py (refactored) @@ -69,7 +69,7 @@ if record is None: record = Record(**kwargs) else: - assert len(kRefactoringTool: Refactored ./theano/tests/main.py RefactoringTool: No changes to ./theano/tests/diverse_tests.py RefactoringTool: Refactored ./theano/tests/disturb_mem.py RefactoringTool: No changes to ./theano/tests/__init__.py RefactoringTool: No changes to ./theano/tensor/xlogx.py RefactoringTool: Refactored ./theano/tensor/var.py RefactoringTool: Refactored ./theano/tensor/utils.py wargs.keys()) == 0 + assert len(list(kwargs.keys())) == 0 self.set_record(record) @@ -77,20 +77,20 @@ def handle_line(line, i, node, fn): try: self.record.handle_line(line) - except MismatchError, e: - print 'Got this MismatchError:' - print e - print 'while processing node i='+str(i)+':' - print 'str(node):',str(node) - print 'Symbolic inputs: ' + except MismatchError as e: + print('Got this MismatchError:') + print(e) + print('while processing node i='+str(i)+':') + print('str(node):',str(node)) + print('Symbolic inputs: ') for elem in node.inputs: - print theano.printing.min_informative_str(elem) - print 'str(output) of outputs: ' + print(theano.printing.min_informative_str(elem)) + print('str(output) of outputs: ') for elem in fn.outputs: assert isinstance(elem, list) elem, = elem - print str(elem) - print 'function name: '+node.fgraph.name + print(str(elem)) + print('function name: '+node.fgraph.name) raise MismatchError("Non-determinism detected by WrapLinker") def callback(i, node, fn): --- ./theano/tests/main.py (original) +++ ./theano/tests/main.py (refactored) @@ -40,9 +40,9 @@ nose = import_nose() import theano - print "Theano version %s" % theano.__version__ + print("Theano version %s" % theano.__version__) theano_dir = os.path.dirname(theano.__file__) - print "theano is installed in %s" % theano_dir + print("theano is installed in %s" % theano_dir) super(TheanoNoseTester, self)._show_system_info() @@ -142,4 +142,4 @@ tests = unittest.TestLoader().loadTestsFromModule(module) unittest.TextTestRunner(verbosity=2).run(tests) else: - print "options: [--debug]" + print("options: [--debug]") --- ./theano/tests/disturb_mem.py (original) +++ ./theano/tests/disturb_mem.py (refactored) @@ -18,4 +18,4 @@ ms = int(ms) n = ms % 1000 m = ms // 1000 - l = [[0]*m for i in xrange(n)] + l = [[0]*m for i in range(n)] --- ./theano/tensor/var.py (original) +++ ./theano/tensor/var.py (refactored) @@ -56,7 +56,7 @@ rval._is_nonzero = False return rval - def __nonzero__(self): + def __bool__(self): # This is meant to prohibit stuff like a < b < c, which is internally # implemented as (a < b) and (b < c). The trouble with this is the # side-effect that checking for a non-NULL a by typing "if a: ..." @@ -415,7 +415,7 @@ def __iter__(self): try: - for i in xrange(theano.tensor.basic.get_vector_length(self)): + for i in range(theano.tensor.basic.get_vector_length(self)): yield self[i] except TypeError: # This prevents accidental iteration via builtin.sum(self) --- ./theano/tensor/utils.py (original) +++ ./theano/tensor/utils.py (refactored) @@ -42,7 +42,7 @@ hashable. """ - items = d.items() + items = list(d.items()) items.sort() first_part = [k for k, v in items] second_part = [] @@ -81,12 +81,12 @@ input_dims = [dimension for inp in fgraph.inputs for dimension in fgraph.shape_feature.shape_of[inp]] - output_dims = [dimension for shape in fgraph.shape_feature.shape_of.values() + output_dims = [dimension for shape in list(fgraph.shape_feature.shape_of.values()) for dimension in shape] compute_shapes = theano.function(input_dims, output_dims) - if any([i not in fgraph.inputs for i in input_shapes.keys()]): + if any([i not in fgraph.inputs for i in list(input_shapes.keys())]): raise ValueErrRefactoringTool: Refactored ./theano/tensor/type_other.py RefactoringTool: Refactored ./theano/tensor/type.py RefactoringTool: Refactored ./theano/tensor/tests/test_xlogx.py RefactoringTool: No changes to ./theano/tensor/tests/test_utils.py RefactoringTool: Refactored ./theano/tensor/tests/test_subtensor.py or( "input_shapes keys aren't in the fgraph.inputs. FunctionGraph()" " interface changed. Now by default, it clones the graph it receives." @@ -96,7 +96,7 @@ for dim in input_shapes[inp]] numeric_output_dims = compute_shapes(*numeric_input_dims) - sym_to_num_dict = dict(zip(output_dims, numeric_output_dims)) + sym_to_num_dict = dict(list(zip(output_dims, numeric_output_dims))) l = {} for var in fgraph.shape_feature.shape_of: --- ./theano/tensor/type_other.py (original) +++ ./theano/tensor/type_other.py (refactored) @@ -18,8 +18,8 @@ class MakeSlice(Op): def make_node(self, slc): return Apply(self, - map(as_int_none_variable, - [slc.start, slc.stop, slc.step]), + list(map(as_int_none_variable, + [slc.start, slc.stop, slc.step])), [slicetype()]) def perform(self, node, inp, out_): --- ./theano/tensor/type.py (original) +++ ./theano/tensor/type.py (refactored) @@ -208,7 +208,7 @@ def value_validity_msg(self, a): try: self.filter(a, strict=True) - except Exception, e: + except Exception as e: return str(e) return "value is valid" --- ./theano/tensor/tests/test_xlogx.py (original) +++ ./theano/tensor/tests/test_xlogx.py (refactored) @@ -4,7 +4,7 @@ import theano from theano.tensor import as_tensor_variable -import test_basic as TT +from . import test_basic as TT import random import numpy.random --- ./theano/tensor/tests/test_subtensor.py (original) +++ ./theano/tensor/tests/test_subtensor.py (refactored) @@ -1,4 +1,4 @@ -from itertools import izip + import logging import sys import unittest @@ -101,7 +101,7 @@ n = self.shared(numpy.ones((), dtype=self.dtype)) try: t = n[0] - except ValueError, e: + except ValueError as e: self.assertTrue(hasattr(e, 'subtensor_invalid')) return self.fail() @@ -122,7 +122,7 @@ try: try: self.eval_output_and_check(t) - except IndexError, e: + except IndexError as e: return self.fail() finally: @@ -132,7 +132,7 @@ n = self.shared(numpy.ones(3, dtype=self.dtype)) try: t = n[slice(0, slice(1, 2, None), None)] - except Exception, e: + except Exception as e: ### Relax constraint on the type of Exception, ### since this might be handled by AvancedSubtensor #if e[0] != Subtensor.e_indextype: @@ -173,7 +173,7 @@ n = self.shared(numpy.ones(1, dtype=self.dtype)) try: t = n[0, 0] - except ValueError, e: + except ValueError as e: self.assertTrue(hasattr(e, 'subtensor_invalid')) return self.fail() @@ -299,7 +299,7 @@ def test_long(self): n = self.shared(numpy.arange(12, dtype=self.dtype).reshape((4, 3))) - t = n[1L:4L:2L, 1L] + t = n[1:4:2, 1] self.assertTrue(isinstance(t.owner.op, Subtensor)) tval = self.eval_output_and_check(t) self.assertTrue(tval.shape == (2,)) @@ -309,7 +309,7 @@ # Currently, we cast Python longs to int64 when used for indexing. # This test checks that using a long that does not fit raises an error. n = self.shared(numpy.arange(12, dtype=self.dtype).reshape((4, 3))) - self.assertRaises(Exception, lambda: n[:(2L ** 63)]) + self.assertRaises(Exception, lambda: n[:(2 ** 63)]) def test_newaxis(self): """ @@ -831,7 +831,7 @@ data_num_init = numpy.arange(data_size, dtype=self.dtype) data_num_init = data_num_init.reshape(data_shape) inc_shapes = [data_shape[i:] - for i in xrange(0, len(data_shape) + 1)] + for i in range(0, len(dataRefactoringTool: No changes to ./theano/tensor/tests/test_sort.py RefactoringTool: No changes to ./theano/tensor/tests/test_sharedvar.py RefactoringTool: Refactored ./theano/tensor/tests/test_shared_randomstreams.py RefactoringTool: Refactored ./theano/tensor/tests/test_raw_random.py _shape) + 1)] for inc_shape in inc_shapes: inc_n_dims = len(inc_shape) # We copy the numeric value to be 100% sure there is no @@ -919,7 +919,7 @@ f_outs = f(*all_inputs_num) assert len(f_outs) == len(all_outputs_num) - for f_out, output_num in izip(f_outs, all_outputs_num): + for f_out, output_num in zip(f_outs, all_outputs_num): # NB: if this assert fails, it will probably be easier to debug if # you enable the debug code above. assert numpy.allclose(f_out, output_num) --- ./theano/tensor/tests/test_shared_randomstreams.py (original) +++ ./theano/tensor/tests/test_shared_randomstreams.py (refactored) @@ -51,10 +51,10 @@ #print numpy_val0 assert numpy.allclose(fn_val0, numpy_val0) - print fn_val0 - print numpy_val0 - print fn_val1 - print numpy_val1 + print(fn_val0) + print(numpy_val0) + print(fn_val1) + print(numpy_val1) assert numpy.allclose(fn_val1, numpy_val1) def test_seed_fn(self): @@ -257,9 +257,9 @@ in_mval = val_rng.uniform(-2, 2, size=(20,5)) fn_mval0 = f(in_mval) fn_mval1 = f(in_mval) - print in_mval[0] - print fn_mval0[0] - print fn_mval1[0] + print(in_mval[0]) + print(fn_mval0[0]) + print(fn_mval1[0]) assert not numpy.all(in_mval == fn_mval0) assert not numpy.all(in_mval == fn_mval1) assert not numpy.all(fn_mval0 == fn_mval1) @@ -287,9 +287,9 @@ numpy_vval = in_vval.copy() vrng = numpy.random.RandomState(int(rng_seed)) vrng.shuffle(numpy_vval) - print in_vval - print fn_vval - print numpy_vval + print(in_vval) + print(fn_vval) + print(numpy_vval) assert numpy.all(numpy_vval == fn_vval) # Trying to shuffle a vector with function that should shuffle @@ -495,15 +495,15 @@ # Arguments of size (3,) val0 = f(low_val, high_val) numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val) - print 'THEANO', val0 - print 'NUMPY', numpy_val0 + print('THEANO', val0) + print('NUMPY', numpy_val0) assert numpy.all(val0 == numpy_val0) # arguments of size (2,) val1 = f(low_val[:-1], high_val[:-1]) numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1]) - print 'THEANO', val1 - print 'NUMPY', numpy_val1 + print('THEANO', val1) + print('NUMPY', numpy_val1) assert numpy.all(val1 == numpy_val1) # Specifying the size explicitly --- ./theano/tensor/tests/test_raw_random.py (original) +++ ./theano/tensor/tests/test_raw_random.py (refactored) @@ -306,8 +306,8 @@ self.assertRaises(ValueError, fm11) self.assertRaises(ValueError, fm12) u01, u02 = f0() - print u01 - print u02 + print(u01) + print(u02) self.assertTrue(numpy.allclose(u01, u02[0])) def test_uniform(self): @@ -328,10 +328,10 @@ val1 = f() numpy_val0 = numpy_rng.uniform(-2.0, 2.0, size=(4,)) numpy_val1 = numpy_rng.uniform(-2.0, 2.0, size=(4,)) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.allclose(val0, numpy_val0)) self.assertTrue(numpy.allclose(val1, numpy_val1)) @@ -355,10 +355,10 @@ val1 = f() numpy_val0 = numpy_rng.binomial(5, 0.8, size=(7, 12)) numpy_val1 = numpy_rng.binomial(5, 0.8, size=(7, 12)) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.all(val0 == numpy_val0)) self.assertTrue(numpy.all(val1 == numpy_val1)) @@ -380,1RefactoringTool: Refactored ./theano/tensor/tests/test_randomstreams.py 0 +380,10 @@ val1 = f() numpy_val0 = numpy_rng.normal(4.0, 2.0, size=(2, 3)) numpy_val1 = numpy_rng.normal(4.0, 2.0, size=(2, 3)) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.allclose(val0, numpy_val0)) self.assertTrue(numpy.allclose(val1, numpy_val1)) @@ -407,10 +407,10 @@ val1 = f() numpy_val0 = numpy_rng.random_integers(-3, 16, size=(11, 8)) numpy_val1 = numpy_rng.random_integers(-3, 16, size=(11, 8)) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.allclose(val0, numpy_val0)) self.assertTrue(numpy.allclose(val1, numpy_val1)) @@ -443,10 +443,10 @@ for i in range(7)]) numpy_val1 = numpy.asarray([numpy_rng.permutation(8) for i in range(7)]) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.all(val0 == numpy_val0)) self.assertTrue(numpy.all(val1 == numpy_val1)) @@ -498,10 +498,10 @@ val1 = f() numpy_val0 = numpy_rng.choice(10, (11, 8), True, None) numpy_val1 = numpy_rng.choice(10, (11, 8), True, None) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.allclose(val0, numpy_val0)) self.assertTrue(numpy.allclose(val1, numpy_val1)) @@ -510,7 +510,7 @@ results as numpy.""" rng_R = random_state_type() post_r, out = permutation(rng_R, size=(9,), n=6) - print 'OUT NDIM', out.ndim + print('OUT NDIM', out.ndim) f = compile.function( [compile.In(rng_R, value=numpy.random.RandomState(utt.fetch_seed()), @@ -527,10 +527,10 @@ for i in range(9)]) numpy_val1 = numpy.asarray([numpy_rng.permutation(6) for i in range(9)]) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.all(val0 == numpy_val0)) self.assertTrue(numpy.all(val1 == numpy_val1)) @@ -552,10 +552,10 @@ val1, = f() numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3)) numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3)) - print val0 - print numpy_val0 - print val1 - print numpy_val1 + print(val0) + print(numpy_val0) + print(val1) + print(numpy_val1) self.assertTrue(numpy.all(val0 == numpy_val0)) self.assertTrue(numpy.all(val1 == numpy_val1)) --- ./theano/tensor/tests/test_randomstreams.py (original) +++ ./theano/tensor/tests/test_randomstreams.py (refactored) @@ -122,8 +122,8 @@ rng = numpy.random.RandomState(realseed) made.random[out.rng] = numpy.random.RandomState(realseed) - print made.fn() - print rng.uniform(size=(2,2)) + print(made.fn()) + print(rng.uniform(size=(2,2))) fn_val0 = made.fn() fn_val1 = made.fn() @@ -186,16 +186,16 @@ made.random.initialize() fn_val0 = made.fn() fn_val1 = made.fn() - print fn_val0 - print fn_val1 + print(fn_val0) + print(fn_val1) rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30) rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit numpy_vRefactoringTool: No changes to ./theano/tensor/tests/test_opt_uncanonicalize.py RefactoringTool: Refactored ./theano/tensor/tests/test_opt.py al0 = rng.uniform(-1, 1, size=(2,2)) numpy_val1 = rng.uniform(-1, 1, size=(2,2)) - print numpy_val0 - print numpy_val1 + print(numpy_val0) + print(numpy_val1) assert numpy.allclose(fn_val0, numpy_val0) assert numpy.allclose(fn_val1, numpy_val1) @@ -301,9 +301,9 @@ in_mval = val_rng.uniform(-2, 2, size=(20,5)) fn_mval0 = mmade.f(in_mval) fn_mval1 = mmade.f(in_mval) - print in_mval[0] - print fn_mval0[0] - print fn_mval1[0] + print(in_mval[0]) + print(fn_mval0[0]) + print(fn_mval1[0]) assert not numpy.all(in_mval == fn_mval0) assert not numpy.all(in_mval == fn_mval1) assert not numpy.all(fn_mval0 == fn_mval1) @@ -334,9 +334,9 @@ numpy_vval = in_vval.copy() vrng = numpy.random.RandomState(int(rng_seed)) vrng.shuffle(numpy_vval) - print in_vval - print fn_vval - print numpy_vval + print(in_vval) + print(fn_vval) + print(numpy_vval) assert numpy.all(numpy_vval == fn_vval) # Trying to shuffle a vector with function that should shuffle --- ./theano/tensor/tests/test_opt.py (original) +++ ./theano/tensor/tests/test_opt.py (refactored) @@ -213,9 +213,9 @@ # e = (x / x) * (y / y) e = (-1 * x) / y / (-2 * z) g = FunctionGraph([x, y, z, a, b, c, d], [e]) - print pprint(g.outputs[0]) + print(pprint(g.outputs[0])) mul_canonizer.optimize(g) - print pprint(g.outputs[0]) + print(pprint(g.outputs[0])) def test_elemwise_multiple_inputs_optimisation(self): """verify that the Canonizer merge sequential Elemwise({mul,add}) part 1 @@ -491,9 +491,9 @@ assert(out_dtype == out.dtype) assert numpy.allclose(out, val_inputs[1]) topo = f.maker.fgraph.toposort() - print "ID TOPO", id, topo, sym_inputs - for r, t in f.maker.fgraph.shape_feature.shape_of.items(): - print ' ', r, t + print("ID TOPO", id, topo, sym_inputs) + for r, t in list(f.maker.fgraph.shape_feature.shape_of.items()): + print(' ', r, t) if topo and not(len(topo)==1 and topo[0].op==deep_copy_op): for node in topo[:-1]: assert isinstance(node.op, Shape_i) @@ -517,10 +517,10 @@ out = f(*val_inputs) assert numpy.allclose(out, (1 / val_inputs[1])) topo = f.maker.fgraph.toposort() - print topo + print(topo) elem = [t for t in topo if isinstance(t.op, T.Elemwise)] assert len(elem) == nb_elemwise - assert isinstance(elem[0].op, (T.Elemwise, )) + assert isinstance(elem[0].op, T.Elemwise) assert isinstance(elem[0].op.scalar_op, ( theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv)) assert(out_dtype == out.dtype) @@ -544,7 +544,7 @@ assert numpy.allclose(out, (val_inputs[0] / val_inputs[3])) topo = f.maker.fgraph.toposort() assert len(topo) == 1 - assert isinstance(topo[0].op, (T.Elemwise, )) + assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.basic.TrueDiv) assert len(topo[0].inputs) == 2 @@ -569,11 +569,11 @@ val_inputs[0] / val_inputs[1])) topo = f.maker.fgraph.toposort() assert len(topo) == 2 - assert isinstance(topo[0].op, (T.Elemwise, )) + assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.basic.Mul) assert len(topo[0].inputs) == 2 - assert isinstance(topo[1].op, (T.Elemwise, )) + assert isinstance(topo[1].op, T.Elemwise) assert isinstance(topo[1].op.scalar_op, theano.scalar.basic.TrueDiv) assert len(topo[1].inputs) == 2 @@ -656,8 +656,8 @@ "local_elemwise_fusion") f = theano.function([x], [(4 * x) / abs(2 * x)], mode=mode) - print f.maker.fgraph.toposort() - print + print(f.maker.fgraph.toposort()) + print() f(.1) f(-1) #some stabilization optimization make the output be finite instead of nan @@ -669,8 +669,8 @@ assert f.maker.fgraph.toposort()[0].op == T.sgn f = theano.function([x], [(4 * x) / abs(x / 2)], mode=mode) - print f.maker.fgraph.toposort() - print + print(f.maker.fgraph.toposort()) + print() f(.1) f(-1) #some stabilization optimization make the output be finite instead of nan @@ -717,9 +717,9 @@ assert numpy.allclose(out, val_inputs[0] / val_inputs[1] / val_inputs[2]) topo = f.maker.fgraph.toposort() - print topo + print(topo) assert len(topo) == 2 - assert isinstance(topo[0].op, (T.Elemwise, )) + assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.basic.Inv) assert len(topo[0].inputs) == 1 @@ -736,9 +736,9 @@ assert numpy.allclose(out, val_inputs[0] / ( val_inputs[1] / val_inputs[2])) topo = f.maker.fgraph.toposort() - print topo + print(topo) assert len(topo) == 2 - assert isinstance(topo[0].op, (T.Elemwise, )) + assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.basic.Inv) assert len(topo[0].inputs) == 1 @@ -1050,9 +1050,9 @@ out_dtype = out_dtype[config.cast_policy] if (gpu and (out_dtype != 'float32' or any(i.dtype != 'float32' for i in g.owner.inputs))): - print "Skip test %d as the gpu code currently supports only float32" % id + print("Skip test %d as the gpu code currently supports only float32" % id) continue - print "new cases", id + print("new cases", id) if shared_fn is None: assert gpu is False @@ -1080,9 +1080,9 @@ atol = 1e-6 if not numpy.allclose(out, answer * nb_repeat, atol=atol): fail1.append(id) - print val_inputs - print out - print answer * nb_repeat + print(val_inputs) + print(out) + print(answer * nb_repeat) #assert 0 topo = f.maker.fgraph.toposort() if gpu: @@ -1114,7 +1114,7 @@ fail4.append((id, out_dtype, out.dtype)) failed = len(fail1 + fail2 + fail3 + fail4) - print "Executed", len(cases), "cases", "failed", failed + print("Executed", len(cases), "cases", "failed", failed) if failed > 0: raise Exception("Failed %d cases" % failed, fail1, fail2, fail3, fail4) @@ -1192,19 +1192,19 @@ #Follow up. Clinker do the same... second cause? mode2 = copy.copy(compile.get_default_mode()) mode2._optimizer = mode2._optimizer.excluding('local_elemwise_fusion') - print "test with linker", str(mode1.linker) + print("test with linker", str(mode1.linker)) times1 = self.do(mode1, shared_fn, shp, gpu=gpu, nb_repeat= nb_repeat, assert_len_topo=False, slice=s) times2 = self.do(mode2, shared_fn, shp, gpu=gpu, nb_repeat= nb_repeat, assert_len_topo=False, slice=s) - print "times1 with local_elemwise_fusion" - print times1, times1.min(), times1.max(), times1.sum() - print "times2 without local_elemwise_fusion" - print times2, times2.min(), times2.max(), times2.sum() + print("times1 with local_elemwise_fusion") + print(times1, times1.min(), times1.max(), times1.sum()) + print("times2 without local_elemwise_fusion") + print(times2, times2.min(), times2.max(), times2.sum()) d = times2 / times1 - print "times2/times1" - print d - print "min", d.min(), "argmin", d.argmin(), "max", d.max(), \ - "mean", d.mean(), "std", d.std() + print("times2/times1") + print(d) + print("min", d.min(), "argmin", d.argmin(), "max", d.max(), \ + "mean", d.mean(), "std", d.std()) def test_fusion_inplace(self): mode = copy.copy(compile.mode.get_default_mode()) @@ -1232,8 +1232,8 @@ linker = gof.OpWiseCLinker mode = compile.Mode(linker(), copy.copy(compile.mode.OPT_FAST_RUN)) mode = compile.ProfileMode() - print "time", self.do(mode, shared, shp=(1000, 1000), gpu= - False, assert_len_topo=False, slice=s, nb_repeat=100) + print("time", self.do(mode, shared, shp=(1000, 1000), gpu= + False, assert_len_topo=False, slice=s, nb_repeat=100)) def tes_memory_leak(self, mode=compile.mode.Mode('c', 'merge'), shared_fn=shared, shp=(3000,3000), gpu=False, nb_repeat=30, assert_len_topo=True, slice=None): @@ -1283,15 +1283,14 @@ gc.collect() gc.collect() nd = objgraph.typestats() - print 'key, old val, new val, diff' - for key in set(d.keys() + nd.keys()): - if d.has_key(key) and nd.has_key(key) and nd[key]!=d[key]: - print key, d.get(key), nd.get(key), - if d.has_key( - key) and nd.has_key(key): - print nd[key] - d[key] + print('key, old val, new val, diff') + for key in set(list(d.keys()) + list(nd.keys())): + if key in d and key in nd and nd[key]!=d[key]: + print(key, d.get(key), nd.get(key), end=' ') + if key in d and key in nd: + print(nd[key] - d[key]) else: - print None + print(None) gc.collect() gc.collect() gc.collect() @@ -1362,7 +1361,9 @@ float %(nodename)s_timesn(float x) { return x * %(n)s; } """ % locals() - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme, xxx_todo_changeme1, sub): + (x, ) = xxx_todo_changeme + (z, ) = xxx_todo_changeme1 return "%(z)s = %(name)s_timesn(%(x)s);" % locals() @@ -1436,7 +1437,7 @@ # check trickier cases (and use different dtype) y = fmatrix() f = function([x, y], T.log(tensor.fill(y, 1) + (x)), mode=m) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) # the first three ops are Shape_i, Shape_i, and Dimshuffle theano.printing.debugprint(f) assert [node.op for node in f.maker.fgraph.toposort()][3:] \ @@ -1486,12 +1487,12 @@ theano.printing.debugprint(f) - print f([10000], [10000]) # causes overflow if handled incorrectly + print(f([10000], [10000])) # causes overflow if handled incorrectly assert numpy.isfinite(f([10000], [10000])) assert numpy.allclose(f([10000], [10000]), 10000 + numpy.log1p(1)) #test that it give the same result when it don't overflow - print f([10], [10]) # don't causes overflow + print(f([10], [10])) # don't causes overflow assert numpy.allclose(f([10], [10]), 10 + numpy.log1p(1)) # test that it also works with more than two args, (this currently fails) @@ -1502,7 +1503,7 @@ theano.printing.debugprint(f) try: - print f([10000], [10000]) # causes overflow if handled incorrectly + print(f([10000], [10000])) # causes overflow if handled incorrectly assert numpy.allclose(f([10000], [10000]), 20000) except AssertionError: raise KnownFailureTest(('log(add(exp)) is not stabilized when adding ' @@ -1885,7 +1886,7 @@ for idx in range(-x_s[0], x_s[0]): f(x_val, idx) # let debugmode test something - for idx in (range(x_s[0], 9) + range(-9, -x_s[0])): + for idx in (list(range(x_s[0], 9)) + list(range(-9, -x_s[0]))): self.assertRaises(IndexError, f, x_val, idx) self.assertRaises(IndexError, g, x_val, idx) @@ -2004,10 +2005,10 @@ #print topo[-1].op assert isinstance(topo[-1].op, DeepCopyOp) - b1r = self.rng.permutation(range(-8, 8))[:2] - e1r = self.rng.permutation(range(-8, 8))[:2] - b2r = self.rng.permutation(range(-8, 8))[:2] - e2r = self.rng.permutation(range(-8, 8))[:2] + b1r = self.rng.permutation(list(range(-8, 8)))[:2] + e1r = self.rng.permutation(list(range(-8, 8)))[:2] + b2r = self.rng.permutation(list(range(-8, 8)))[:2] + e2r = self.rng.permutation(list(range(-8, 8)))[:2] s1r = self.rng.permutation([-7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7])[:2] @@ -2087,9 +2088,9 @@ #print topo[-1].op assert isinstance(topo[-1].op, DeepCopyOp) - b_r = self.rng.permutation(range(-4, 4))[:3] - e_r = self.rng.permutation(range(-4, 4))[:3] - i_r = self.rng.permutation(range(-4, 4))[:3] + b_r = self.rng.permutation(list(range(-4, 4)))[:3] + e_r = self.rng.permutation(list(range(-4, 4)))[:3] + i_r = self.rng.permutation(list(range(-4, 4)))[:3] s_r = self.rng.permutation([-3, -2, -1, 1, 2, 3])[:3] @@ -2116,8 +2117,8 @@ n_ok += 1 f(x_val, b_v, e_v, s_v, i_v) - print 'shape: %s' % (x_s,) - print '%% OK: %f' % (float(n_ok) * 100 / (n_ok + n_index_err)) + print('shape: %s' % (x_s,)) + print('%% OK: %f' % (float(n_ok) * 100 / (n_ok + n_index_err))) def test_none_slice(self): # Test case of two slices, var[b1:e1:s1][b2:e2:s2] @@ -2135,10 +2136,10 @@ none_positions = numpy.ndindex(2, 2, 2, 2, 2, 2) # Ranges to be used when not None - b1r = self.rng.permutation(range(-4, 4))[:] - e1r = self.rng.permutation(range(-4, 4))[:] - b2r = self.rng.permutation(range(-4, 4))[:] - e2r = self.rng.permutation(range(-4, 4))[:] + b1r = self.rng.permutation(list(range(-4, 4)))[:] + e1r = self.rng.permutation(list(range(-4, 4)))[:] + b2r = self.rng.permutation(list(range(-4, 4)))[:] + e2r = self.rng.permutation(list(range(-4, 4)))[:] s1r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:] s2r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:] @@ -2194,9 +2195,9 @@ none_positions = numpy.ndindex(2, 2, 2, 1) # Ranges to be used when not None - b_r = self.rng.permutation(range(-4, 4))[:] - e_r = self.rng.permutation(range(-4, 4))[:] - i_r = self.rng.permutation(range(-4, 4))[:] + b_r = self.rng.permutation(list(range(-4, 4)))[:] + e_r = self.rng.permutation(list(range(-4, 4)))[:] + i_r = self.rng.permutation(list(range(-4, 4)))[:] s_r = self.rng.permutation([-4, -3, -2, -1, 1, 2, 3, 4])[:] scalar_vars = [b, e, s, i] @@ -2537,9 +2538,9 @@ c01b = T.set_subtensor(wide_infinity[:, 0:img_shp, 0:img_shp, :], c01b) - for row_within_pool in xrange(pool_shp): + for row_within_pool in range(pool_shp): row_stop = last_pool_r + row_within_pool + 1 - for col_within_pool in xrange(pool_shp): + for col_within_pool in range(pool_shp): col_stop = last_pool_c + col_within_pool + 1 cur = c01b[:, row_within_pool:row_stop:pool_stride, col_within_pool:col_stop:pool_stride, :] @@ -2646,7 +2647,7 @@ mode = theano.compile.get_default_mode().excluding('ShapeOpt') f = theano.function([X], expr, mode=mode) - print f([[1, 2], [2, 3]]) + print(f([[1, 2], [2, 3]])) def test_no_cycle(self): # Optimizing this graph resulted in a cycle, see gh-1549 @@ -2682,7 +2683,7 @@ def test0(self): x = T.scalar() y = T.scalar() - f = theano.function([x, y], theano.tensor.opt.assert_(x, T.eq(x, y))) + f = theano.function([x, y], theano.tensor.opt.assertTrue(x, T.eq(x, y))) f(1, 1) self.assertRaises(AssertionError, f, 1, 0) @@ -2694,7 +2695,7 @@ mode = compile.mode.get_mode(mode) x = T.scalar() - f = theano.function([x], theano.tensor.opt.assert_(x, 1), mode=mode) + f = theano.function([x], theano.tensor.opt.assertTrue(x, 1), mode=mode) assert f(1) == 1 assert f(5) == 5 topo = f.maker.fgraph.toposort() @@ -2710,7 +2711,7 @@ x = T.scalar() y = T.scalar() - f = theano.function([x, y], theano.tensor.opt.assert_(x, y, + f = theano.function([x, y], theano.tensor.opt.assertTrue(x, y, 1), mode=mode) assert f(1, 1) == 1 assert f(5, 1) == 5 @@ -2728,7 +2729,7 @@ x = T.scalar() y = T.scalar() - f = theano.function([x, y], theano.tensor.opt.assert_(x, y, + f = theano.function([x, y], theano.tensor.opt.assertTrue(x, y, 0), mode=mode) self.assertRaises(AssertionError, f, 1, 0) topo = f.maker.fgraph.toposort() @@ -2742,14 +2743,14 @@ bdscal = dscalar() adscal_val = numpy.random.rand() bdscal_val = numpy.random.rand() + 1 - out = theano.tensor.opt.assert_(adscal, bdscal) + out = theano.tensor.opt.assertTrue(adscal, bdscal) self._compile_and_check([adscal, bdscal], [out], [adscal_val, bdscal_val], Assert) admat = dmatrix() admat_val = numpy.random.rand(3, 4) adscal_val += 1 - out = theano.tensor.opt.assert_(admat, adscal, bdscal) + out = theano.tensor.opt.assertTrue(admat, adscal, bdscal) self._compile_and_check([admat, adscal, bdscal], [out], [admat_val, adscal_val, bdscal_val], Assert) @@ -2766,34 +2767,34 @@ f = function([v], v * 1, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) nodes == [deep_copy_op] f = function([v], v * 0, mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) assert nodes == [Shape_i(0), T.alloc] f = function([v], v * (-1), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) assert nodes == [T.neg] f = function([v, m], v * 1 * (-m), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) theano.printing.debugprint(f) assert nodes == [T.mul, inplace.neg_inplace] f = function([v, m], v * 0 * (-m), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) theano.printing.debugprint(f) assert nodes == [Shape_i(0), T.alloc] f = function([v, m], v * (-1) * (-m), mode=mode) nodes = [node.op for node in f.maker.fgraph.toposort()] - print nodes + print(nodes) theano.printing.debugprint(f) assert nodes == [T.mul] @@ -2812,9 +2813,9 @@ t2 = time.time() f2(val) t3 = time.time() - print i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2 + print(i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2) if not t2 - t1 < t3 - t2: - print "WARNING WE ARE SLOWER" + print("WARNING WE ARE SLOWER") for i in range(-3, -1500, -1): f1 = function([v], v ** i, mode=mode) f2 = function([v], v ** i, mode=mode_without_pow_opt) @@ -2824,9 +2825,9 @@ t2 = time.time() f2(val) t3 = time.time() - print i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2 + print(i, t2 - t1, t3 - t2, t2 - t1 < t3 - t2) if not t2 - t1 < t3 - t2: - print "WARNING WE ARE SLOWER" + print("WARNING WE ARE SLOWER") def test_local_pow_specialize(): @@ -2965,7 +2966,7 @@ f2 = theano.function([x], T.eq(x, x), mode=self.mode) assert numpy.all(f2(vx) == numpy.ones((5, 4))) topo2 = f2.maker.fgraph.toposort() - print topo2 + print(topo2) #Shape_i{1}(), Shape_i{0}(), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0 assert len(topo2) == 3 assert isinstance(topo2[-1].op, T.Alloc) @@ -2984,7 +2985,7 @@ f2 = theano.function([x], T.neq(x, x), mode=self.mode) assert numpy.all(f2(vx) == numpy.zeros((5, 4))) topo2 = f2.maker.fgraph.toposort() - print topo2 + print(topo2) assert len(topo2) == 3 assert isinstance(topo2[-1].op, T.Alloc) @@ -3001,7 +3002,7 @@ f2 = theano.function([x, y], T.mul(x, y), mode=self.mode) assert numpy.all(f2(vx, vy) == vx * vy) topo2 = f2.maker.fgraph.toposort() - print topo2 + print(topo2) assert len(topo2) == 1 assert isinstance(topo2[0].op, T.Elemwise) assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul) @@ -3019,7 +3020,7 @@ f2 = theano.function([x, y], T.add(x, y), mode=self.mode) assert numpy.all(f2(vx, vy) == vx + vy) topo2 = f2.maker.fgraph.toposort() - print topo2 + print(topo2) assert len(topo2) == 1 assert isinstance(topo2[0].op, T.Elemwise) assert isinstance(topo2[0].op.scalar_op, theano.scalar.Add) @@ -3149,20 +3150,20 @@ x = T.vector() f = theano.function([x], 1 + T.erf(x), mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.mul, T. erfc], f.maker.fgraph.toposort() f(val) f = theano.function([x], T.erf(x) + 1, mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.mul, T. erfc], f.maker.fgraph.toposort() f(val) f = theano.function([x], T.erf(x) + 2, mode=self.mode) topo = f.maker.fgraph.toposort() - print topo + print(topo) assert len(topo) == 2 assert topo[0].op == T.erf assert isinstance(topo[1].op, T.Elemwise) @@ -3175,32 +3176,32 @@ x = T.vector() f = theano.function([x], 1 - T.erf(x), mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], 1 + (-T.erf(x)), mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], (-T.erf(x)) + 1, mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], 2 - T.erf(x), mode=self.mode) topo = f.maker.fgraph.toposort() - print topo + print(topo) assert len(topo) == 2, f.maker.fgraph.toposort() assert topo[0].op == T.erf, f.maker.fgraph.toposort() assert isinstance(topo[1].op, T.Elemwise), f.maker.fgraph.toposort() assert isinstance(topo[1].op.scalar_op, scal.Add)\ or isinstance(topo[1].op.scalar_op,scal.Sub), f.maker.fgraph.toposort() - print f(val) + print(f(val)) def test_local_erf_minus_one(self): val = numpy.asarray([-30, -3, -2, -1, 0, 1, 2, 3, 30], @@ -3208,29 +3209,29 @@ x = T.vector() f = theano.function([x], T.erf(x) - 1, mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul] - print f(val) + print(f(val)) f = theano.function([x], T.erf(x) + (-1), mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul] - print f(val) + print(f(val)) f = theano.function([x], -1 + T.erf(x), mode=self.mode) - print f.maker.fgraph.toposort() + print(f.maker.fgraph.toposort()) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erfc, T.mul] - print f(val) + print(f(val)) f = theano.function([x], T.erf(x) - 2, mode=self.mode) topo = f.maker.fgraph.toposort() - print topo + print(topo) assert len(topo) == 2 assert topo[0].op == T.erf assert isinstance(topo[1].op, T.Elemwise) assert isinstance(topo[1].op.scalar_op, scal.Add)\ or isinstance(topo[1].op.scalar_op, scal.Sub) - print f(val) + print(f(val)) class T_local_erfc(unittest.TestCase): @@ -3RefactoringTool: Refactored ./theano/tensor/tests/test_naacl09.py 253,13 +3254,13 @@ theano.printing.debugprint(f) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], (-T.erfc(x)) + 1, mode=self.mode) theano.printing.debugprint(f) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], 2 - T.erfc(x), mode=self.mode) topo = f.maker.fgraph.toposort() @@ -3269,7 +3270,7 @@ assert isinstance(topo[1].op, T.Elemwise), f.maker.fgraph.toposort() assert isinstance(topo[1].op.scalar_op, scal.Sub)\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) def test_local_erf_neg_minus_one(self): """ test opt: (-1)+erfc(-x)=>erf(x)""" @@ -3281,19 +3282,19 @@ theano.printing.debugprint(f) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], T.erfc(-x) - 1, mode=self.mode) theano.printing.debugprint(f) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) f = theano.function([x], T.erfc(-x) + (-1), mode=self.mode) theano.printing.debugprint(f) assert [n.op for n in f.maker.fgraph.toposort()] == [T.erf]\ , f.maker.fgraph.toposort() - print f(val) + print(f(val)) def test_local_log_erfc(self): val = [-30, -27, -26, -11, -10, -3, -2, -1, 0, 1, 2, 3, 10, @@ -3418,14 +3419,14 @@ f1 = theano.function([x], T.log(T.erfc(x)), mode=mode. excluding("local_log_erfc")) f2 = theano.function([x], T.log(T.erfc(x)), mode=mode) - print f1.maker.fgraph.toposort() - print f2.maker.fgraph.toposort() + print(f1.maker.fgraph.toposort()) + print(f2.maker.fgraph.toposort()) t0 = time.time() f1(val) t1 = time.time() f2(val) t2 = time.time() - print t1 - t0, t2 - t1 + print(t1 - t0, t2 - t1) class test_local_remove_switch_const_cond(unittest.TestCase): @@ -3769,7 +3770,7 @@ config.warn.sum_div_dimshuffle_bug = False try: for i, s in enumerate(sums): - print i + print(i) f = theano.function([a, b, c, d], s, mode=self.mode, on_unused_input='ignore') theano.printing.debugprint(f) @@ -4039,7 +4040,7 @@ out = num_v / denom_m theano.printing.debugprint(out, print_type=True) - print out.broadcastable + print(out.broadcastable) assert numpy.all(out.broadcastable == (True, False)) f = theano.function([num_len_s, denom_s], out) @@ -4133,7 +4134,7 @@ admat = matrix() admat_val = numpy.random.rand(4, 3).astype(config.floatX) - for i in xrange(2): + for i in range(2): f = function([admat], Shape_i(i)(admat)) out = f(admat_val) assert numpy.allclose(out, admat_val.shape[i]) --- ./theano/tensor/tests/test_naacl09.py (original) +++ ./theano/tensor/tests/test_naacl09.py (refactored) @@ -88,7 +88,7 @@ if _qfilters is None: #self.qfilters = [theano.Member(T.dmatrix('q%i'%i)) for i in xrange(n_quadratic_filters)] self.qfilters = [(T.dmatrix('q%i' % i)) - for i in xrange(n_quadratic_filters)] + for i in range(n_quadratic_filters)] else: #self.qfilters = [theano.Member(q) for q in _qfilters] self.qfilters = [(q) for q in _qfilters] @@ -434,8 +434,8 @@ hidden_pretraining_cost, hidden_pretraining_params) pretraining_updates = \ dict((p, p - self.lr * g) for p, g in \ - zip(inRefactoringTool: No changes to ./theano/tensor/tests/test_mpi.py RefactoringTool: Refactored ./theano/tensor/tests/test_misc.py RefactoringTool: Refactored ./theano/tensor/tests/test_merge.py RefactoringTool: No changes to ./theano/tensor/tests/test_keepdims.py RefactoringTool: No changes to ./theano/tensor/tests/test_io.py RefactoringTool: Refactored ./theano/tensor/tests/test_inc_subtensor.py put_pretraining_params, input_pretraining_gradients) \ - + zip(hidden_pretraining_params, hidden_pretraining_gradients)) + list(zip(input_pretraining_params, input_pretraining_gradients)) \ + + list(zip(hidden_pretraining_params, hidden_pretraining_gradients))) self.pretraining_update = module.Method(self.inputs, [input_pretraining_cost, hidden_pretraining_cost], @@ -583,8 +583,8 @@ for i, node in enumerate(m.pretraining_update.maker.fgraph.toposort()): idx_of_node[node] = i if False and i > -1: - print ' ', i, node, [(ii, idx_of_node.get(ii. - owner, 'IN')) for ii in node.inputs] + print(' ', i, node, [(ii, idx_of_node.get(ii. + owner, 'IN')) for ii in node.inputs]) prog_str.append(str(node)) #print input_pretraining_gradients[4].owner.inputs #print input_pretraining_gradients[4].owner.inputs[1].owner.inputs @@ -594,14 +594,14 @@ rng = N.random.RandomState(unittest_tools.fetch_seed(23904)) - inputs = [rng.rand(10, m.input_size) for i in 1, 2, 3] + inputs = [rng.rand(10, m.input_size) for i in (1, 2, 3)] targets = N.asarray([0, 3, 4, 2, 3, 4, 4, 2, 1, 0]) #print inputs #print 'UNSUPERVISED PHASE' t = time.time() - for i in xrange(3): - for j in xrange(iters_per_unsup): + for i in range(3): + for j in range(iters_per_unsup): try: known_fail = False m.pretraining_update(*inputs) @@ -623,8 +623,8 @@ #print 'FINETUNING GRAPH' #print 'SUPERVISED PHASE COSTS (%s)'%optimizer t = time.time() - for i in xrange(3): - for j in xrange(iters_per_unsup): + for i in range(3): + for j in range(iters_per_unsup): m.finetuning_update(*(inputs + [targets])) s0 = str(m.finetuning_update(*(inputs + [targets]))) #print iters_per_sup * (i+1), s0 --- ./theano/tensor/tests/test_misc.py (original) +++ ./theano/tensor/tests/test_misc.py (refactored) @@ -12,7 +12,7 @@ tensor.int_div( tensor.DimShuffle(y[0].broadcastable, ['x'])(y[0]), 2)) sys.stdout.flush() - print f(numpy.ones(1, dtype='int64') * 3) + print(f(numpy.ones(1, dtype='int64') * 3)) # XXX: there is no assert, nor comment that DEBUGMODE is to do the # checking. What was the bug, and how is it being tested? @@ -29,10 +29,10 @@ y = numpy.ones((2, 5)) z = g(x, y) - print z # Should be zero. + print(z) # Should be zero. x.fill(1) - print g(x, y) # Should be non-zero. - print z # Should still be zero. + print(g(x, y)) # Should be non-zero. + print(z) # Should still be zero. assert numpy.linalg.norm(z) == 0 # The code above was supposed to fail when it was written (or, more @@ -62,10 +62,10 @@ z = g(a, b, c) z_backup = copy.copy(z) id_z = id(z) - print('Output z after first call: %s' % (z, )) + print(('Output z after first call: %s' % (z, ))) a[0, 0] = 1 id_other = id(g(a, b, c)) - print ('Output z after second call: %s' % (z, )) + print(('Output z after second call: %s' % (z, ))) # Ensure that calling the function again returns a pointer towards a new # array. assert id_z != id_other --- ./theano/tensor/tests/test_merge.py (original) +++ ./theano/tensor/tests/test_merge.py (refactored) @@ -29,7 +29,7 @@ self.x = x def make_node(self, *inputs): - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): raise Exception("Error 1") --- ./theano/tensor/tests/test_inc_subtensor.py (original) +++ ./theano/tensor/tests/test_inc_subtensor.py (refactored) @@ -101,7 +101,7 @@ sl3 = 2 for do_set in [True, False]: - print "Set", do_set + print("Set", do_set) if do_set: RefactoringTool: Refactored ./theano/tensor/tests/test_gc.py RefactoringTool: No changes to ./theano/tensor/tests/test_fourier.py RefactoringTool: Refactored ./theano/tensor/tests/test_extra_ops.py RefactoringTool: Refactored ./theano/tensor/tests/test_elemwise.py resut = tt.set_subtensor(a[sl1, sl3, sl2], increment) --- ./theano/tensor/tests/test_gc.py (original) +++ ./theano/tensor/tests/test_gc.py (refactored) @@ -1,4 +1,4 @@ -import cPickle +import pickle import sys import numpy import theano @@ -24,7 +24,7 @@ x = T.dvector() #print >> sys.stderr, 'BUILDING GRAPH' - for i in xrange(2): #TODO: 30 causes like LONG compilation due to MERGE + for i in range(2): #TODO: 30 causes like LONG compilation due to MERGE if i : r = r + r/10 else: @@ -45,8 +45,8 @@ f = theano.function([x], r,mode=theano.Mode(optimizer=optimizer, linker=f_linker)) g = theano.function([x], r,mode=theano.Mode(optimizer=optimizer, linker=g_linker)) - len_pre_f = len(cPickle.dumps(f)) - len_pre_g = len(cPickle.dumps(g)) + len_pre_f = len(pickle.dumps(f)) + len_pre_g = len(pickle.dumps(g)) # should be no difference at first # In future, FunctionMaker might pickle linker-dependent stuff and make @@ -54,19 +54,19 @@ assert len_pre_f == len_pre_g def a(fn): - return len(cPickle.dumps(fn.maker)) + return len(pickle.dumps(fn.maker)) assert a(f) == a(f) # some sanity checks on the pickling mechanism assert a(g) == a(g) # some sanity checks on the pickling mechanism def b(fn): return len( - cPickle.dumps( + pickle.dumps( theano.compile.function_module._pickle_Function( fn))) assert b(f) == b(f) # some sanity checks on the pickling mechanism def c(fn): - return len(cPickle.dumps(fn)) + return len(pickle.dumps(fn)) assert c(f) == c(f) # some sanity checks on the pickling mechanism assert c(g) == c(g) # some sanity checks on the pickling mechanism @@ -77,8 +77,8 @@ g(numpy.ones(100, dtype='float64')) # serialize the functions again - post_f = cPickle.dumps(f) - post_g = cPickle.dumps(g) + post_f = pickle.dumps(f) + post_g = pickle.dumps(g) len_post_f = len(post_f) len_post_g = len(post_g) @@ -98,7 +98,7 @@ Ironically, there is actually no merging to do in this graph. """ x = T.dvector() - for i in xrange(50): + for i in range(50): if i : r = r + r/10 else: --- ./theano/tensor/tests/test_extra_ops.py (original) +++ ./theano/tensor/tests/test_extra_ops.py (refactored) @@ -195,7 +195,7 @@ class TestRepeatOp(utt.InferShapeTester): def _possible_axis(self, ndim): - return [None] + range(ndim) + [-i for i in range(ndim)] + return [None] + list(range(ndim)) + [-i for i in range(ndim)] def setUp(self): super(TestRepeatOp, self).setUp() --- ./theano/tensor/tests/test_elemwise.py (original) +++ ./theano/tensor/tests/test_elemwise.py (refactored) @@ -1,6 +1,6 @@ -import cPickle +import pickle from copy import copy -from itertools import imap + import unittest import numpy @@ -311,7 +311,7 @@ e = as_tensor_variable(tensor_op(x, axis=tosum)) if tosum is None: - tosum = range(len(xsh)) + tosum = list(range(len(xsh))) f = copy(linker).accept(FunctionGraph([x], [e])).make_function() xv = numpy.asarray(numpy.random.rand(*xsh)) @@ -417,7 +417,7 @@ else: e = tensor_op(x, axis=tosum) if tosum is None: - tosum = range(len(xsh)) + tosum = list(range(len(xsh))) f = copy(linker).accept(FunctionGraph([x], [e.shape])).make_function() if not(scalar_op in [scalar.maximum, scalar.minimum] and @@ -506,7 +506,7 @@ dtype = theano.config.floatX x = TensorType(dtype, [(entry == 1) for entry in xsh])('x') if tosum is None: - tosum = range(len(xsh)) + tosum = list(range(len(xsh))) xv = numpy.asarray(numpy.random.rand(*xsh), dtype=dtype) self._compile_and_check([x], [self.op(scalar.add, axis=tosum)(x)], @@ -630,9 +630,9 @@ def test_pickle_bug(self): # Regression test for bug fixed in 24d4fd291054. o = Prod() - s = cPickle.dumps(o, protocol=-1) - o = cPickle.loads(s) - cPickle.dumps(o) + s = pickle.dumps(o, protocol=-1) + o = pickle.loads(s) + pickle.dumps(o) class test_IsInf_IsNan(unittest.TestCase): @@ -681,7 +681,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) s = x.sum(axis=axis) @@ -702,7 +702,7 @@ ##Test the default acc_dtype of a sum(). # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) s = x.sum(axis=axis) @@ -728,9 +728,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for output_dtype in imap(str, theano.scalar.all_types): + for output_dtype in map(str, theano.scalar.all_types): # If the output is a complex, the gradient of the sum will # cast the complex to the input dtype. We can't call the normal # cast on a complex to a not complex as this is ambiguous. @@ -760,9 +760,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for acc_dtype in imap(str, theano.scalar.all_types): + for acc_dtype in map(str, theano.scalar.all_types): # If the accumulator is a complex, the gradient of the sum will # cast the complex to the input dtype. We can't call the normal # cast on a complex to a not complex as this is ambiguous. @@ -808,7 +808,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) m = x.mean(axis=axis) @@ -828,9 +828,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for sum_dtype in imap(str, theano.scalar.all_types): + for sum_dtype in map(str, theano.scalar.all_types): axis = axes[idx % len(axes)] # If the inner sum cannot be created, it will raise a # TypeError. @@ -886,7 +886,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) p = x.prod(axis=axis) @@ -909,7 +909,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) p = x.prod(axis=axis) @@ -935,9 +935,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for output_dtype in imap(str, theano.scalar.all_types): + for output_dtype in map(str, theano.scalar.all_types): axis = axes[idx % len(axes)] idx += 1 prod_var = x.prod(dtype=output_dtype, axis=axis) @@ -966,9 +966,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for acc_dtype in imap(str, theano.scalar.all_types): + for acc_dtype in map(str, theano.scalar.all_types): axis = axes[idx % len(axes)] # If acc_dtype would force a downcast, we expect a TypeError # We always allow int/uint inputs with float/complex outputs. @@ -1007,7 +1007,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = ProdWithoutZeros(axis=axis)(tensor.matrix(dtype=dtype)) assert x.dtype == dict( @@ -1025,7 +1025,7 @@ """ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] - for idx, dtype in enumerate(imap(str, theano.scalar.all_types)): + for idx, dtype in enumerate(map(str, theano.scalar.all_types)): axis = axes[idx % len(axes)] x = tensor.matrix(dtype=dtype) p = ProdWithoutZeros(axis=axis)(x) @@ -1054,9 +1054,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for output_dtype in imap(str, theano.scalar.all_types): + for output_dtype in map(str, theano.scalar.all_types): axis = axes[idx % len(axes)] prod_woz_var = ProdWithoutZeros( axis=axis, dtype=output_dtype)(x) @@ -1077,9 +1077,9 @@ # We try multiple axis combinations even though axis should not matter. axes = [None, 0, 1, [], [0], [1], [0, 1]] idx = 0 - for input_dtype in imap(str, theano.scalar.all_types): + for input_dtype in map(str, theano.scalar.all_types): x = tensor.matrix(dtype=input_dtype) - for acc_dtype in imap(str, theano.scalar.all_types): + for acc_dtype in map(str, theano.scalar.all_types): RefactoringTool: Refactored ./theano/tensor/tests/test_complex.py RefactoringTool: No changes to ./theano/tensor/tests/test_casting.py RefactoringTool: Refactored ./theano/tensor/tests/test_blas_scipy.py RefactoringTool: Refactored ./theano/tensor/tests/test_blas_c.py axis = axes[idx % len(axes)] # If acc_dtype would force a downcast, we expect a TypeError # We always allow int/uint inputs with float/complex outputs. @@ -1212,7 +1212,9 @@ def impl(self, n, x): return x * n - def grad(self, (n, x), (gz,)): + def grad(self, xxx_todo_changeme, xxx_todo_changeme1): + (n, x) = xxx_todo_changeme + (gz,) = xxx_todo_changeme1 dy_dx = n return [theano.gradient.grad_not_implemented(self, 0, n), gz * dy_dx] --- ./theano/tensor/tests/test_complex.py (original) +++ ./theano/tensor/tests/test_complex.py (refactored) @@ -10,7 +10,7 @@ def test0(self): x= zvector() rng = numpy.random.RandomState(23) - xval = numpy.asarray(list(numpy.complex(rng.randn(), rng.randn()) for i in xrange(10))) + xval = numpy.asarray(list(numpy.complex(rng.randn(), rng.randn()) for i in range(10))) assert numpy.all( xval.real == theano.function([x], real(x))(xval)) assert numpy.all( xval.imag == theano.function([x], imag(x))(xval)) @@ -67,9 +67,9 @@ aval = numpy.asarray(rng.randn(2,5)) try: utt.verify_grad(f, [aval]) - except utt.verify_grad.E_grad, e: - print e.num_grad.gf - print e.analytic_grad + except utt.verify_grad.E_grad as e: + print(e.num_grad.gf) + print(e.analytic_grad) raise @dec.knownfailureif(True,"Complex grads not enabled, see #178") @@ -83,9 +83,9 @@ aval = numpy.asarray(rng.randn(2,5)) try: utt.verify_grad(f, [aval]) - except utt.verify_grad.E_grad, e: - print e.num_grad.gf - print e.analytic_grad + except utt.verify_grad.E_grad as e: + print(e.num_grad.gf) + print(e.analytic_grad) raise @dec.knownfailureif(True,"Complex grads not enabled, see #178") def test_mul_mixed(self): @@ -99,9 +99,9 @@ bval = rng.randn(5) try: utt.verify_grad(f, [aval, bval]) - except utt.verify_grad.E_grad, e: - print e.num_grad.gf - print e.analytic_grad + except utt.verify_grad.E_grad as e: + print(e.num_grad.gf) + print(e.analytic_grad) raise @dec.knownfailureif(True,"Complex grads not enabled, see #178") --- ./theano/tensor/tests/test_blas_scipy.py (original) +++ ./theano/tensor/tests/test_blas_scipy.py (refactored) @@ -4,7 +4,7 @@ import theano.tensor as tensor from theano.tensor.blas_scipy import ScipyGer -from test_blas import TestCase, gemm_no_inplace, TestBlasStrides +from .test_blas import TestCase, gemm_no_inplace, TestBlasStrides from theano.tests.unittest_tools import TestOptimizationMixin class TestScipyGer(TestCase, TestOptimizationMixin): --- ./theano/tensor/tests/test_blas_c.py (original) +++ ./theano/tensor/tests/test_blas_c.py (refactored) @@ -55,23 +55,23 @@ return tensor.as_tensor_variable(numpy.asarray(bval, dtype=self.dtype)) def test_eq(self): - self.assert_(CGer(True) == CGer(True)) - self.assert_(CGer(False) == CGer(False)) - self.assert_(CGer(False) != CGer(True)) - - self.assert_(CGer(True) != ScipyGer(True)) - self.assert_(CGer(False) != ScipyGer(False)) - self.assert_(CGer(True) != Ger(True)) - self.assert_(CGer(False) != Ger(False)) + self.assertTrue(CGer(True) == CGer(True)) + self.assertTrue(CGer(False) == CGer(False)) + self.assertTrue(CGer(False) != CGer(True)) + + self.assertTrue(CGer(True) != ScipyGer(True)) + self.assertTrue(CGer(False) != ScipyGer(False)) + self.assertTrue(CGer(True) != Ger(True)) + self.assertTrue(CGer(False) != Ger(False)) # assert that eq works for non-CGer instances - self.assert_(CGer(False) is not None) - self.assert_(CGer(True) is not None) + self.assertTrue(CGer(False) is not None) + RefactoringTool: Refactored ./theano/tensor/tests/test_blas.py self.assertTrue(CGer(True) is not None) def test_hash(self): - self.assert_(hash(CGer(True)) == hash(CGer(True))) - self.assert_(hash(CGer(False)) == hash(CGer(False))) - self.assert_(hash(CGer(False)) != hash(CGer(True))) + self.assertTrue(hash(CGer(True)) == hash(CGer(True))) + self.assertTrue(hash(CGer(False)) == hash(CGer(False))) + self.assertTrue(hash(CGer(False)) != hash(CGer(True))) def test_optimization_pipeline(self): f = self.function([self.x, self.y], tensor.outer(self.x, self.y)) --- ./theano/tensor/tests/test_blas.py (original) +++ ./theano/tensor/tests/test_blas.py (refactored) @@ -19,7 +19,7 @@ gemm_inplace, gemm_no_inplace, InconsistencyError, Ger, ger, ger_destructive) from theano.tests import unittest_tools -from test_basic import (_approx_eq, as_tensor_variable, inplace_func, +from .test_basic import (_approx_eq, as_tensor_variable, inplace_func, compile, inplace) #, constant, eval_outputs) import theano.tensor.blas_scipy @@ -72,10 +72,10 @@ b = numpy.asarray(b_, dtype=dtype) def cmp_linker(z, a, x, y, b, l): - z, a, x, y, b = [numpy.asarray(p) for p in z, a, x, y, b] + z, a, x, y, b = [numpy.asarray(p) for p in (z, a, x, y, b)] z_orig = z.copy() tz, ta, tx, ty, tb = [as_tensor_variable(p).type() - for p in z, a, x, y, b] + for p in (z, a, x, y, b)] f = inplace_func([tz, ta, tx, ty, tb], gemm_inplace(tz, ta, tx, ty, tb), @@ -105,7 +105,7 @@ Gemm.debug = True try: g = gemm_inplace([1.], 1., [1.], [1.], 1.) - except TypeError, e: + except TypeError as e: if exc_message(e) is Gemm.E_rank: return self.fail() @@ -113,7 +113,7 @@ def test0(self): try: self.cmp(1., 0., 1.0, 1.0, 1.0) - except TypeError, e: + except TypeError as e: if exc_message(e) is Gemm.E_rank: return self.fail() @@ -121,7 +121,7 @@ def test2(self): try: self.cmp(2., 1.0, [3, 2, 1.], [[1], [2], [3.]], 1.0) - except TypeError, e: + except TypeError as e: self.assertTrue(exc_message(e) == Gemm.E_rank) return self.fail() @@ -213,7 +213,7 @@ Z = as_tensor_variable(self.rand(2, 2)) try: gemm_inplace(Z, 1.0, Z, Z, 1.0) - except InconsistencyError, e: + except InconsistencyError as e: if exc_message(e) == Gemm.E_z_uniq: return self.fail() @@ -224,7 +224,7 @@ A = as_tensor_variable(self.rand(2, 2)) try: gemm_inplace(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0) - except InconsistencyError, e: + except InconsistencyError as e: if exc_message(e) == Gemm.E_z_uniq: return self.fail() @@ -235,7 +235,7 @@ A = as_tensor_variable(self.rand(2, 2)) try: gemm_inplace(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0) - except InconsistencyError, e: + except InconsistencyError as e: if exc_message(e) == Gemm.E_z_uniq: return self.fail() @@ -246,7 +246,7 @@ A = as_tensor_variable(self.rand(2, 2)) try: gemm_inplace(Z, 1.0, Z, A, 1.0) - except InconsistencyError, e: + except InconsistencyError as e: if exc_message(e) == Gemm.E_z_uniq: return self.fail() @@ -269,11 +269,11 @@ def t(z, x, y, a=1.0, b=0.0, l='c|py', dt='float64'): z, a, x, y, b = [theano._asarray(p, dtype=dt) - for p in z, a, x, y, b] + for p in (z, a, x, y, b)] z_orig = z.copy() z_after = self._gemm(z, a, x, y, b) - tz, ta, tx, ty, tb = [shared(p) for p in z, a, x, y, b] + tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)] #f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb), # mode = compile.Mode(optimizer = None, linker=l)) @@ -318,7 +318,7 @@ try: t(C.T, A[:2, :], B[:, :2].T) - except ValueError, e: + except ValueError as e: if exc_message(e).find('aligned') >= 0: return self.fail() @@ -332,20 +332,20 @@ def t(z, x, y, a=1.0, b=0.0, l='c|py', dt='float64'): z, a, x, y, b = [theano._asarray(p, dtype=dt) - for p in z, a, x, y, b] + for p in (z, a, x, y, b)] z_orig = z.copy() z_after = numpy.zeros_like(z_orig) - for i in xrange(3): + for i in range(3): z_after[:, :, i] = self._gemm(z[:, :, i], a, x[:, :, i], y[:, :, i], b) - tz, ta, tx, ty, tb = [shared(p) for p in z, a, x, y, b] - for i in xrange(3): + tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)] + for i in range(3): f_i = inplace_func([], gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb), mode=compile.Mode(optimizer=None, linker=l)) - for j in xrange(3): + for j in range(3): # tz will not _always_ be overwritten, # and adding update={...} in the call to function() # will create cycles, so we update by hand. @@ -361,7 +361,7 @@ g_i = theano.function([], tz_i, updates=[(tz, T.set_subtensor(tz[:, :, i], tz_i))], mode=compile.Mode(optimizer=None, linker=l)) - for j in xrange(3): + for j in range(3): g_i() unittest_tools.assert_allclose(z_after[:, :, i], tz.get_value(borrow=True)[:, :, i]) @@ -438,7 +438,7 @@ def fail(msg): - print 'FAIL', msg + print('FAIL', msg) assert False @@ -500,7 +500,7 @@ max_abs_err) except Failure: for node in f.maker.fgraph.toposort(): - print 'GRAPH', node + print('GRAPH', node) raise @@ -575,7 +575,7 @@ max_abs_err) except Failure: for node in f.maker.fgraph.toposort(): - print 'GRAPH', node + print('GRAPH', node) raise @@ -785,7 +785,7 @@ def update_H(cur_V): return T.nnet.sigmoid(T.dot(cur_V, W) + T.dot(G, W.T)) - for i in xrange(num_rounds): + for i in range(num_rounds): cur_V = update_V(cur_H) cur_H = update_H(cur_V) @@ -812,7 +812,7 @@ f = inplace_func([Z, b, R, S], [Z * (Z + b * T.dot(R, S).T)], mode='FAST_RUN') if (gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]): - print pp(f.maker.fgraph.outputs[0]) + print(pp(f.maker.fgraph.outputs[0])) raise Failure('gemm_inplace in graph') assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes] @@ -1276,10 +1276,10 @@ b_is_vector = False assert a.shape[1] == b.shape[0] c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) - for i in xrange(a.shape[0]): - for j in xrange(b.shape[1]): + for i in range(a.shape[0]): + for j in range(b.shape[1]): s = 0 - for k in xrange(a.shape[1]): + for k in range(a.shape[1]): s += a[i, k] * b[k, j] c[i, j] = s if b_is_vector: @@ -1549,13 +1549,13 @@ self.za = T.zscalar() def test_works_on_all_valid_dtypes(self): - RefactoringTool: Refactored ./theano/tensor/tests/test_basic.py self.assertEquals(self.fm.type, + self.assertEqual(self.fm.type, ger(self.fm, self.fa, self.fv, self.fv_2).type) - self.assertEquals(self.fm.type, + self.assertEqual(self.fm.type, ger(self.fm, self.fa, self.fv, self.fv_2).type) - self.assertEquals(self.fm.type, + self.assertEqual(self.fm.type, ger(self.fm, self.fa, self.fv, self.fv_2).type) - self.assertEquals(self.fm.type, + self.assertEqual(self.fm.type, ger(self.fm, self.fa, self.fv, self.fv_2).type) def test_fails_on_invalid_dtypes(self): @@ -1571,7 +1571,7 @@ self.assertRaises(TypeError, ger, self.fm, self.fv1, self.fv, self.fv_2) # actually doing the aforementioned dimshuffle makes it work - self.assertEquals(self.fm.type, + self.assertEqual(self.fm.type, ger(self.fm, self.fv1.dimshuffle(), self.fv, self.fv_2).type) def test_fails_for_nonmatrix_A(self): --- ./theano/tensor/tests/test_basic.py (original) +++ ./theano/tensor/tests/test_basic.py (refactored) @@ -7,10 +7,11 @@ import unittest import warnings from copy import copy, deepcopy -from itertools import izip +from functools import reduce + # Import builtin min to be able to use it after importing the tensor version. -import __builtin__ -builtin_min = __builtin__.min +import builtins +builtin_min = builtins.min from nose.plugins.skip import SkipTest import numpy @@ -284,7 +285,7 @@ good = self.add_memmap_values(self.good) - for testname, inputs in good.items(): + for testname, inputs in list(good.items()): inputs = [copy(input) for input in inputs] inputrs = [TensorType( dtype=input.dtype, @@ -293,7 +294,7 @@ )() for input in inputs] try: node = safe_make_node(self.op, *inputrs) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while" " making a node with inputs %s") % ( self.op, testname, inputs) @@ -302,7 +303,7 @@ try: f = inplace_func(inputrs, node.outputs, mode=mode, name='test_good') - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while" " trying to make a Function") % (self.op, testname) exc.args += (err_msg,) @@ -324,7 +325,7 @@ try: variables = f(*inputs) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while calling" " the Function on the inputs %s") % ( self.op, testname, inputs) @@ -335,7 +336,7 @@ expecteds = (expecteds, ) for i, (variable, expected) in enumerate( - izip(variables, expecteds)): + zip(variables, expecteds)): if (variable.dtype != expected.dtype or variable.shape != expected.shape or not numpy.allclose(variable, expected, @@ -357,7 +358,7 @@ atol=eps, rtol=eps), numpy.allclose(variable, expected))) - for description, check in self.checks.items(): + for description, check in list(self.checks.items()): if not check(inputs, variables): self.fail(("Test %s::%s: Failed check: %s (inputs" " were %s, outputs were %s)") % ( @@ -367,7 +368,7 @@ def test_bad_build(self): if skip: raise SkipTest(skip) - for testname, inputs in self.bad_build.items(): + for testname, inputs in list(self.bad_build.items()): inputs = [copy(input) for input in inputs] inputrs = [shared(input) for input in inputs] self.assertRaises(Exception, @@ -379,11 +380,11 @@ def test_bad_runtime(self): if skip: raise SkipTest(skip) - for testname, inputs in self.bad_runtime.items(): + for testname, inputs in list(self.bad_runtime.items()): inputrs = [shared(input) for input in inputs] try: node = safe_make_node(self.op, *inputrs) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while trying" " to make a node with inputs %s") % ( self.op, testname, inputs) @@ -392,7 +393,7 @@ try: f = inplace_func([], node.outputs, mode=mode, name="test_bad_runtime") - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while trying" " to make a Function") % (self.op, testname) exc.args += (err_msg,) @@ -411,13 +412,13 @@ backup = config.warn.sum_div_dimshuffle_bug config.warn.sum_div_dimshuffle_bug = False try: - for testname, inputs in self.grad.items(): + for testname, inputs in list(self.grad.items()): inputs = [copy(input) for input in inputs] try: utt.verify_grad(self.op, inputs, mode=self.mode, rel_tol=_grad_rtol) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occurred while" " computing the gradient on the following" " inputs: %s") % (self.op, testname, inputs) @@ -438,7 +439,7 @@ # This is not actually an Op return - for testname, inputs in self.good.items(): + for testname, inputs in list(self.good.items()): inputs = [copy(input) for input in inputs] inputrs = [TensorType( dtype=input.dtype, @@ -771,7 +772,7 @@ for a in without: if a in rval: del rval[a] - for kw, val in kwargs.items(): + for kw, val in list(kwargs.items()): rval[kw] = val return rval @@ -2285,7 +2286,7 @@ b = numpy.asarray(b) if a.shape != b.shape: if _approx_eq.debug: - print a.shape, b.shape + print(a.shape, b.shape) return False abs_rel_err = numeric_grad.abs_rel_err(a, b) # numpy.max don't like empty ndarray. @@ -2293,7 +2294,7 @@ return True if numpy.max(abs_rel_err) >= eps: if _approx_eq.debug: - print a, b + print(a, b) return False return True _approx_eq.debug = 0 @@ -2366,8 +2367,8 @@ n = len(test_constants) # We verify that signatures of two rows i, j in the matrix above are # equal if and only if i == j. - for i in xrange(n): - for j in xrange(n): + for i in range(n): + for j in range(n): x = constant(test_constants[i]) y = constant(test_constants[j]) assert (x.signature() == y.signature()) == (i == j) @@ -2448,7 +2449,7 @@ try: eval_outputs(max_and_argmax(n, 3)) assert False - except ValueError, e: + except ValueError as e: pass finally: _logger.setLevel(oldlevel) @@ -2461,7 +2462,7 @@ try: eval_outputs(max_and_argmax(n, -3)) assert False - except ValueError, e: + except ValueError as e: pass finally: sys.stderr = old_stderr @@ -2529,7 +2530,7 @@ # Compute pairwise absolute differences. diff = numpy.abs(data_vector.reshape((-1, 1)) - data_vector) # Alter the diagonal to avoid a zero minimum. - for i in xrange(len(diff)): + for i in range(len(diff)): diff[i, i] = 1 # Find an appropriate epsilon. eps = builtin_min(numeric_grad.type_eps[config.floatX], @@ -2557,7 +2558,7 @@ assert numpy.all(max_grad_data == z) for axis in (-1, 0, 1, None): - for j in xrange(2): + for j in range(2): safe_verify_grad(lambda v: max_and_argmax(v, axis=axis)[j], [data]) if axis != 1: @@ -2641,7 +2642,7 @@ try: eval_outputs(fct(n, 3)) assert False - except ValueError, e: + except ValueError as e: pass finally: _logger.setLevel(oldlevel) @@ -2655,7 +2656,7 @@ try: eval_outputs(fct(n, -3)) assert False - except ValueError, e: + except ValueError as e: pass finally: sys.stderr = old_stderr @@ -2774,7 +2775,7 @@ try: eval_outputs(fct(n, 3)) assert False - except ValueError, e: + except ValueError as e: pass finally: _logger.setLevel(oldlevel) @@ -2788,7 +2789,7 @@ try: eval_outputs(fct(n, -3)) assert False - except ValueError, e: + except ValueError as e: pass finally: sys.stderr = old_stderr @@ -3779,7 +3780,7 @@ x = numpy.asarray(x, dtype=config.floatX) w = numpy.asarray(w, dtype=config.floatX) - for i in xrange(100): + for i in range(100): ssd, gw = fn(x, w) #print ssd, x*w, x, w if i == 0: @@ -3802,7 +3803,7 @@ myssd0 = numpy.sum((x * w - ones) ** 2.0) # we want at least a test that is not too fast. So we make one here. - for i in xrange(100): + for i in range(100): gw = 2 * (x * w - ones) * x # derivative of dMSE/dw myssd = numpy.sum((x * w - ones) ** 2) w -= 0.4 * gw @@ -3959,7 +3960,7 @@ try: tz = eval_outputs([z]) assert False # should have raised exception - except ValueError, e: + except ValueError as e: e0 = exc_message(e) self.assertTrue( # Reported by numpy. @@ -4376,8 +4377,8 @@ def test_reshape_long_in_shape(self): v = dvector('v') - r = v.reshape((v.shape[0], 1L)) - print r.eval({v: numpy.arange(5.)}) + r = v.reshape((v.shape[0], 1)) + print(r.eval({v: numpy.arange(5.)})) assert numpy.allclose(r.eval({v: numpy.arange(5.)}).T, numpy.arange(5.)) @@ -5398,8 +5399,8 @@ def ok(z): assert tensor.constant(z).dtype == numpy.asarray(z).dtype - for x in ([2 ** i for i in xrange(63)] + - [0, 0L, 1L, 2L ** 63 - 1] + + for x in ([2 ** i for i in range(63)] + + [0, 0, 1, 2 ** 63 - 1] + [0., 1., 1.1, 1.5]): n_x = numpy.asarray(x) # Make sure the data type is the same as the one found by numpy. @@ -5431,8 +5432,8 @@ # We only consider 'int' and 'long' Python values that can fit # into int64, as that is the maximal integer type that Theano # supports, and that is the maximal type in Python indexing. - for x iRefactoringTool: No changes to ./theano/tensor/tests/mlp_test.py RefactoringTool: No changes to ./theano/tensor/tests/_test_mpi_roundtrip.py RefactoringTool: Refactored ./theano/tensor/subtensor.py n ([2 ** i - 1 for i in xrange(64)] + - [0, 0L, 1L, 2L ** 63 - 1] + + for x in ([2 ** i - 1 for i in range(64)] + + [0, 0, 1, 2 ** 63 - 1] + [0., 1., 1.1, 1.5]): ok(x, floatX) ok(-x, floatX) @@ -5496,10 +5497,10 @@ ('i_scalar', 'i_scalar'), ): - theano_args = map(eval, - ['theano_%s' % c for c in combo]) - numpy_args = map(eval, - ['numpy_%s' % c for c in combo]) + theano_args = list(map(eval, + ['theano_%s' % c for c in combo])) + numpy_args = list(map(eval, + ['numpy_%s' % c for c in combo])) try: theano_dtype = op( theano_args[0](a_type), @@ -5524,7 +5525,7 @@ op(numpy_args[1](b_type), numpy_args[0](a_type)).dtype] numpy_dtype = theano.scalar.upcast( - *map(str, numpy_dtypes)) + *list(map(str, numpy_dtypes))) if numpy_dtype == theano_dtype: # Same data type found, all is good! continue @@ -5598,8 +5599,8 @@ class T_long_tensor(unittest.TestCase): def test_fit_int64(self): - for exp in xrange(64): - val = 2L ** exp - 1 + for exp in range(64): + val = 2 ** exp - 1 scalar_ct = constant(val) assert scalar_ct.dtype.startswith('int') assert scalar_ct.value == val @@ -5613,7 +5614,7 @@ assert numpy.all(matrix_ct.value == val) def test_too_big(self): - val = 2L ** 63 + val = 2 ** 63 #NumPy 1.7 this will raise an exception #NumPy 1.7.1 this will work try: @@ -5640,7 +5641,7 @@ except TypeError: pass - val = 2L ** 64 + val = 2 ** 64 # This fail for all NumPy version. self.assertRaises(Exception, constant, val) self.assertRaises(Exception, constant, [val, val]) @@ -5812,7 +5813,7 @@ assert not b.flags.aligned assert numpy.allclose(out_numpy, out_theano) assert False - except TypeError, e: + except TypeError as e: pass a = numpy.empty((), dtype=dtype)['f1'] @@ -5830,7 +5831,7 @@ assert not b.flags.aligned assert numpy.allclose(out_numpy, out_theano) assert False - except TypeError, e: + except TypeError as e: pass @@ -5841,7 +5842,7 @@ try: y = tensor.DimShuffle((False, ), (0, 0))(x) - except ValueError, e: + except ValueError as e: assert str(e).find("may not appear twice") != -1 success = True @@ -6056,7 +6057,7 @@ assert tensor.transpose(tensor.dmatrix()).name is None def test_stacklists(): - a,b,c,d = map(scalar, 'abcd') + a,b,c,d = list(map(scalar, 'abcd')) X = stacklists([[a, b], [c, d]]) f = function([a, b, c, d], X) --- ./theano/tensor/subtensor.py (original) +++ ./theano/tensor/subtensor.py (refactored) @@ -1,5 +1,5 @@ from copy import copy -from itertools import izip + import sys from textwrap import dedent import warnings @@ -355,7 +355,7 @@ # be bigger than what a Python int can hold. # Shapes should always fit in a numpy.int64, and we support them better # 2) In Python3, long replaced int. So we must assert it fit in int64. - elif isinstance(entry, (int, long)): + elif isinstance(entry, int): entry64 =RefactoringTool: No changes to ./theano/tensor/sort.py RefactoringTool: No changes to ./theano/tensor/signal/tests/test_downsample.py RefactoringTool: No changes to ./theano/tensor/signal/tests/test_conv.py RefactoringTool: Refactored ./theano/tensor/signal/downsample.py numpy.int64(entry) return entry64 else: @@ -393,7 +393,7 @@ # infer the broadcasting pattern padded = (idx_list + [slice(None, None, None)] * (x.type.ndim - len(idx_list))) - broadcastable = [bc for p, bc in izip(padded, x.type.broadcastable) + broadcastable = [bc for p, bc in zip(padded, x.type.broadcastable) if isinstance(p, slice)] input_types = Subtensor.collapse(idx_list, @@ -402,7 +402,7 @@ raise IndexError( "Not enough inputs to fill in the Subtensor template.", inputs, idx_list) - for input, expected_type in izip(inputs, input_types): + for input, expected_type in zip(inputs, input_types): if input.type != expected_type: raise TypeError( "Wrong type for Subtensor template. Expected %s, got %s." @@ -440,7 +440,7 @@ padded = (actual_idx_list + [slice(None, None, None)] * (len(xshp) - len(self.idx_list))) i = 0 - for idx, xl in izip(padded, xshp): + for idx, xl in zip(padded, xshp): if isinstance(idx, slice): # If it is the default (None, None, None) slice, or a variant, # the shape will be xl @@ -1019,7 +1019,7 @@ destroyhandler_tolerate_aliased=None): if destroyhandler_tolerate_aliased is None: destroyhandler_tolerate_aliased = [] - self.idx_list = map(Subtensor.convert, idx_list) + self.idx_list = list(map(Subtensor.convert, idx_list)) self.inplace = inplace if inplace: self.destroy_map = {0: [0]} @@ -1076,7 +1076,7 @@ y: the value to increment by inputs: TODO WRITEME """ - x, y = map(theano.tensor.as_tensor_variable, [x, y]) + x, y = list(map(theano.tensor.as_tensor_variable, [x, y])) if y.ndim > x.ndim: raise ValueError(("Trying to increment a %d-dimensional " "subtensor with a %d-dimensional value.") % (x.ndim, @@ -1098,7 +1098,7 @@ raise IndexError( "Not enough inputs to fill in the Subtensor template.", inputs, idx_list) - for input, expected_type in izip(inputs, input_types): + for input, expected_type in zip(inputs, input_types): if input.type != expected_type: raise TypeError( "Wrong type for Subtensor template. Expected %s, got %s." @@ -1984,7 +1984,7 @@ if axis < 0: axis += a.ndim assert axis >= 0 - shuffle = range(a.ndim) + shuffle = list(range(a.ndim)) shuffle[0] = axis shuffle[axis] = 0 return advanced_subtensor1( --- ./theano/tensor/signal/downsample.py (original) +++ ./theano/tensor/signal/downsample.py (refactored) @@ -5,7 +5,7 @@ """ #This file should move along with conv.py -import __builtin__ +import builtins import numpy @@ -15,7 +15,7 @@ def max_pool2D(*args, **kwargs): import sys - print >> sys.stderr, "DEPRECATION: max_pool2D renamed to max_pool_2d" + print("DEPRECATION: max_pool2D renamed to max_pool_2d", file=sys.stderr) return max_pool_2d(*args, **kwargs) @@ -160,13 +160,13 @@ x_usable3 = (x.shape[3] // ds1 * ds1) else: x_usable3 = x.shape[3] - for n in xrange(x.shape[0]): - for k in xrange(x.shape[1]): - for i in xrange(x_usable2): + for n in range(x.shape[0]): + for k in range(x.shape[1]): + for i in range(x_usable2): zi = i / ds0 - for j in xrange(x_usable3): + for j in range(x_usable3): zj = j / ds1 - zz[n, k, zi, zj] = __builtin__.max(zz[n, k, zi, zj], + zz[n, k, zi, zj] = builtins.max(zz[n, k, zi, zj], RefactoringTool: No changes to ./theano/tensor/signal/conv.py RefactoringTool: No changes to ./theano/tensor/sharedvar.py RefactoringTool: No changes to ./theano/tensor/shared_randomstreams.py RefactoringTool: Refactored ./theano/tensor/raw_random.py x[n, k, i, j]) def infer_shape(self, node, in_shapes): @@ -296,11 +296,11 @@ shape3 = (x.shape[3] // ds1 * ds1) if not self.ignore_border: shape3 = x.shape[3] - for n in xrange(x.shape[0]): - for k in xrange(x.shape[1]): - for i in xrange(shape2): + for n in range(x.shape[0]): + for k in range(x.shape[1]): + for i in range(shape2): zi = i / ds0 - for j in xrange(shape3): + for j in range(shape3): zj = j / ds1 if (maxout[n,k,zi,zj] == x[n,k,i,j]): gx[n,k,i,j] = gz[n,k,zi,zj] --- ./theano/tensor/raw_random.py (original) +++ ./theano/tensor/raw_random.py (refactored) @@ -11,6 +11,7 @@ from theano.tensor import opt from theano import gof from theano.compile import optdb +from functools import reduce class RandomStateType(gof.Type): @@ -137,7 +138,7 @@ def __setstate__(self, state): self.state = state fn, outtype, inplace, ndim_added = state - if isinstance(fn, basestring): + if isinstance(fn, str): self.fn = getattr(numpy.random.RandomState, fn) else: self.fn = fn @@ -182,7 +183,7 @@ assert shape.type.ndim == 1 assert (shape.type.dtype == 'int64') or (shape.type.dtype == 'int32') if not isinstance(r.type, RandomStateType): - print >> sys.stderr, 'WARNING: RandomState instances should be in RandomStateType' + print('WARNING: RandomState instances should be in RandomStateType', file=sys.stderr) if 0: raise TypeError('r must be RandomStateType instance', r) # the following doesn't work because we want to ignore the @@ -191,7 +192,7 @@ # convert args to TensorType instances # and append enough None's to match the length of self.args - args = map(tensor.as_tensor_variable, args) + args = list(map(tensor.as_tensor_variable, args)) return gof.Apply(self, [r, shape] + args, @@ -212,7 +213,7 @@ #Use the default infer_shape implementation. raise tensor.ShapeError() - return [None, [sample_shp[i] for i in xrange(node.outputs[1].ndim)]] + return [None, [sample_shp[i] for i in range(node.outputs[1].ndim)]] def perform(self, node, inputs, out_): rout, out = out_ @@ -402,11 +403,11 @@ # Will contain the return value: a list of indices for each argument ret_indices = [[()] for shape in all_shapes] - for dim in xrange(len(out_shape)): + for dim in range(len(out_shape)): # Temporary list to generate the indices _ret_indices = [[] for shape in all_shapes] - out_range = range(out_shape[dim]) + out_range = list(range(out_shape[dim])) # Verify the shapes are compatible along that dimension # and generate the appropriate range: out_range, or [0, ..., 0] @@ -425,7 +426,7 @@ for prev_index in zip(*ret_indices): for dim_index in zip(*ranges): - for i in xrange(len(all_shapes)): + for i in range(len(all_shapes)): _ret_indices[i].append(prev_index[i] + (dim_index[i],)) ret_indices = _ret_indices @@ -493,7 +494,7 @@ """ if prob is not None: p = prob - print >> sys.stderr, "DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy." + print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr) n = tensor.as_tensor_variable(n) p = tensor.as_tensor_variable(p) ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, p) @@ -541,7 +542,7 @@ out_size = tuple(size) else: out_size = () - for dim in xrange(out_ndim): + for dim in raRefactoringTool: No changes to ./theano/tensor/randomstreams.py RefactoringTool: No changes to ./theano/tensor/opt_uncanonicalize.py RefactoringTool: Refactored ./theano/tensor/opt.py nge(out_ndim): dim_len = max(low.shape[dim], high.shape[dim]) out_size = out_size + (dim_len,) @@ -713,7 +714,7 @@ size = tuple(size) else: size = () - for dim in xrange(ndim): + for dim in range(ndim): dim_len = max(n.shape[dim], pvals.shape[dim]) size = size + (dim_len,) out_size = size + (pvals.shape[-1],) @@ -844,7 +845,7 @@ """ if prob is not None: p = prob - print >> sys.stderr, "DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy." + print("DEPRECATION WARNING: the parameter prob to the binomal fct have been renamed to p to have the same name as numpy.", file=sys.stderr) return self.gen(binomial, size, n, p, ndim=ndim, dtype=dtype) def uniform(self, size=None, low=0.0, high=1.0, ndim=None, dtype=None): --- ./theano/tensor/opt.py (original) +++ ./theano/tensor/opt.py (refactored) @@ -5,13 +5,14 @@ # TODO: 0*x -> 0 import logging +from functools import reduce _logger = logging.getLogger('theano.tensor.opt') import operator import itertools import sys import traceback -from itertools import izip + import numpy import numpy as N # guys... please don't do this in the library :( @@ -148,7 +149,7 @@ rval = T.alloc(T.cast(value, dtype), *new_shape) # the template may have 1s in its shape without being broadcastable if rval.broadcastable != template.broadcastable: - rval = T.unbroadcast(rval, *[i for i in xrange(rval.ndim) + rval = T.unbroadcast(rval, *[i for i in range(rval.ndim) if rval.broadcastable[i] and not template.broadcastable[i]]) assert rval.type.dtype == dtype @@ -227,14 +228,14 @@ isinstance(f, theano.compile.function_module.Supervisor)] protected_inputs = sum(protected_inputs, []) # flatten the list protected_inputs.extend(fgraph.outputs) - candidate_outputs = [i for i in xrange(len(node.outputs)) + candidate_outputs = [i for i in range(len(node.outputs)) if i not in baseline] # node inputs that are Constant, already destroyed, # fgraph protected inputs and fgraph outputs can't be used as inplace # target. # Remove here as faster. - candidate_inputs = [i for i in xrange(len(node.inputs)) - if i not in baseline.values() \ + candidate_inputs = [i for i in range(len(node.inputs)) + if i not in list(baseline.values()) \ and not isinstance(node.inputs[i], Constant)\ and not fgraph.destroyers(node.inputs[i])\ @@ -258,12 +259,12 @@ new_scal = op.scalar_op.make_new_inplace( scalar.transfer_type( *[inplace_pattern.get(i, None) \ - for i in xrange(len(node.outputs))])) + for i in range(len(node.outputs))])) else: new_scal = op.scalar_op.__class__( scalar.transfer_type( *[inplace_pattern.get(i, None) \ - for i in xrange(len(node.outputs))])) + for i in range(len(node.outputs))])) new_outputs = OP(new_scal, inplace_pattern)( *node.inputs, **dict(return_list=True)) new_node = new_outputs[0].owner @@ -276,12 +277,12 @@ fgraph.validate() chk = fgraph.checkpoint() nb_change_no_validate = 0 - except (ValueError, TypeError, InconsistencyError), e: + except (ValueError, TypeError, InconsistencyError) as e: if check_each_change != 1 and not raised_warning: - print >> sys.stderr, ( + print(( "Some inplace optimization was not " - "performed due to unexpected error:") - print >> sys.stderr, e + "performed due to unexpected error:"), file=sys.stderr) + print(e, file=sys.stderr) raised_warning = True fgraph.revert(chk) continue @@ -295,8 +296,8 @@ fgraph.validate() except Exception: if not raised_warning: - print >> sys.stderr, ("Some inplace optimization was not " - "performed due to unexpected error") + print(("Some inplace optimization was not " + "performed due to unexpected error"), file=sys.stderr) fgraph.revert(chk) return inplace_elemwise_optimizer @@ -421,7 +422,7 @@ op.new_order] inplace = op.inplace and inode.op.inplace iinput = inode.inputs[0] - if new_order == range(len(new_order)) and (len(new_order) == + if new_order == list(range(len(new_order))) and (len(new_order) == iinput.type.ndim): return [iinput] else: @@ -532,7 +533,7 @@ return hash(type(self)) ^ hash(self.dtype) def make_node(self, *inputs): - inputs = map(T.as_tensor_variable, inputs) + inputs = list(map(T.as_tensor_variable, inputs)) if not all(a.type == inputs[0].type for a in inputs) or ( len(inputs) > 0 and inputs[0].dtype != self.dtype): dtype = theano.scalar.upcast(self.dtype, @@ -735,7 +736,7 @@ def shape_tuple(self, r): """Return a tuple of symbolic shape vars for tensor variable r""" - return tuple([self.shape_ir(i, r) for i in xrange(r.ndim)]) + return tuple([self.shape_ir(i, r) for i in range(r.ndim)]) def default_infer_shape(self, node, i_shapes): """Return a list of shape tuple or None for the outputs of node. @@ -763,7 +764,7 @@ # don't make the optimizer merge a zillion ones together # by always returning the same object to represent 1 return self.lscalar_one - if type(s_i) in (int, long) or isinstance(s_i, numpy.integer): + if type(s_i) in (int, int) or isinstance(s_i, numpy.integer): # this shape is a constant assert s_i >= 0 return T.constant(s_i, dtype='int64') @@ -980,14 +981,14 @@ except ShapeError: o_shapes = self.default_infer_shape(node, [self.shape_of[r] for r in node.inputs]) - except NotImplementedError, e: + except NotImplementedError as e: raise NotImplementedError( 'Code called by infer_shape failed raising a ' 'NotImplementedError. Raising NotImplementedError to ' 'indicate that a shape cannot be computed is no longer ' 'supported, and one should now use tensor.ShapeError ' 'instead. The original exception message is: %s' % e) - except Exception, e: + except Exception as e: msg = ('Failed to infer_shape from Op %s.\nInput shapes: ' '%s\nException encountered during infer_shape: ' '%s\nException message: %s\nTraceback: %s') % ( @@ -1037,7 +1038,7 @@ new_shape += sh[len(new_shape):] o_shapes[sh_idx] = tuple(new_shape) - for r, s in izip(node.outputs, o_shapes): + for r, s in zip(node.outputs, o_shapes): self.set_shape(r, s) def on_change_input(self, fgraph, node, i, r, new_r, reason): @@ -1088,7 +1089,7 @@ self.scheduled[shpnode] = new_r # In case 2, if r is a variable that we've scheduled for shape update, # then we should cancel it. - unscheduled = [k for k, v in self.scheduled.items() if v == r] + unscheduled = [k for k, v in list(self.scheduled.items()) if v == r] for k in unscheduled: del self.scheduled[k] @@ -1371,7 +1372,7 @@ out = onames[0] check = [] fail = sub['fail'] - for idx in xrange(len(inames) - 1): + for idx in range(len(inames) - 1): i = inames[idx + 1] dtype = node.inputs[idx + 1].dtype check.append('if(!((npy_%(dtype)s*)PyArray_DATA(%(i)s))[0])' @@ -1485,7 +1486,7 @@ if theano.config.experimental.local_alloc_elemwise_assert: assert_op = assert_(assert_op, *[T.eq(i.shape[idx], cmp_op.shape[idx])\ - for idx in xrange(i.type.ndim) \ + for idx in range(i.type.ndim) \ if not i.type.broadcastable[idx]]) new.append(i.owner.inputs[0]) elif i.owner and isinstance(i.owner.op, T.DimShuffle) \ @@ -1495,7 +1496,7 @@ if theano.config.experimental.local_alloc_elemwise_assert: assert_op = assert_(assert_op, *[T.eq(i.shape[idx], cmp_op.shape[idx]) - for idx in xrange(i.type.ndim) + for idx in range(i.type.ndim) if not i.type.broadcastable[idx]]) new.append(i.owner.inputs[0].owner.inputs[0]) else: @@ -1572,7 +1573,7 @@ return new_inputs.append(T.alloc(T.cast(cval_i, output_dtype), - *[shape_i(d)(i) for d in xrange(i.ndim)])) + *[shape_i(d)(i) for d in range(i.ndim)])) #print >> sys.stderr, "AAA", #*[Shape_i(d)(i) for d in xrange(i.ndim)] except NotScalarConstantError: @@ -1585,17 +1586,17 @@ if new_inputs != node.inputs: rval = [node.op(*new_inputs)] if rval[0].type != node.outputs[0].type: - print >> sys.stderr, "NODE:", node - print >> sys.stderr, "NODE INPUT TYPES:", [i.type for i - in node.inputs] - print >> sys.stderr, "NODE OUTPUT TYPES:", [ - o.type for o in node.outputs] - print >> sys.stderr, "RVAL:", rval - print >> sys.stderr, "NEW INPUT TYPES:", [i.type for i - in new_inputs] - print >> sys.stderr, "RVAL INPUT TYPES:", [ - i.type for i in rval[0].owner.inputs] - print >> sys.stderr, "RVAL TYPES:", [o.type for o in rval] + print("NODE:", node, file=sys.stderr) + print("NODE INPUT TYPES:", [i.type for i + in node.inputs], file=sys.stderr) + print("NODE OUTPUT TYPES:", [ + o.type for o in node.outputs], file=sys.stderr) + print("RVAL:", rval, file=sys.stderr) + print("NEW INPUT TYPES:", [i.type for i + in new_inputs], file=sys.stderr) + print("RVAL INPUT TYPES:", [ + i.type for i in rval[0].owner.inputs], file=sys.stderr) + print("RVAL TYPES:", [o.type for o in rval], file=sys.stderr) assert rval[0].type == node.outputs[0].type, (node, rval[0]) return rval @@ -1747,7 +1748,7 @@ j += 1 # now keep the broadcastable pattern of all # items not appearing in subtensor list - for i in xrange(len(node.op.idx_list), len(u.broadcastable)): + for i in range(len(node.op.idx_list), len(u.broadcastable)): new_axis += [(j, u.broadcastable[i])] j += 1 @@ -2189,14 +2190,14 @@ else: # Keep the flags that modify something new_axis = {} - for dim, bc in node.op.axis.items(): + for dim, bc in list(node.op.axis.items()): if x.broadcastable[dim] != bc: new_axis[dim] = bc if new_axis == node.op.axis: # All flags are useful return else: - return [T.Rebroadcast(*new_axis.items())(x)] + return [T.Rebroadcast(*list(new_axis.items()))(x)] @register_canonicalize @@ -2222,7 +2223,7 @@ # by the `unbroadcast` function before we are in the actual function # compilation phase. if hasattr(input, 'clients') and len(input.clients) == 1: - rval = inode.op.make_node(T.Rebroadcast(*op.axis.items())( + rval = inode.op.make_node(T.Rebroadcast(*list(op.axis.items()))( inode.inputs[0])).outputs return rval if inode and isinstance(inode.op, T.Rebroadcast): @@ -2231,7 +2232,7 @@ axis = inode.op.axis.copy() axis.update(op.axis) iinput = inode.inputs[0] - rval = [T.Rebroadcast(*axis.items())(iinput)] + rval = [T.Rebroadcast(*list(axis.items()))(iinput)] return rval @@ -2311,7 +2312,7 @@ if out.type.broadcastable != node.outputs[0].type.broadcastable: # We need to copy data to the new dimensions during execution out = T.alloc(out, *[node.outputs[0].shape[i] for i - in xrange(out.ndim)]) + in range(out.ndim)]) return [out] return False @@ -2510,10 +2511,10 @@ def tmp(thing): try: return T.get_scalar_constant_value(thing) - except (TypeError, ValueError), e: - print e, thing.owner.inputs[0] + except (TypeError, ValueError) as e: + print(e, thing.owner.inputs[0]) return None - print 'LOCAL SUM EMPTY', [tmp(s) for s in y_shape] + print('LOCAL SUM EMPTY', [tmp(s) for s in y_shape]) ################## # Middleman cuts # @@ -2768,8 +2769,8 @@ # then num is concat(numx, numy) and denum is # concat(denumx, denumy) note that main() can have any # number of arguments >= 0 concat is list concatenation - num = reduce(list.__iadd__, map(operator.itemgetter(0), pairs)) - denum = reduce(list.__iadd__, map(operator.itemgetter(1), pairs)) + num = reduce(list.__iadd__, list(map(operator.itemgetter(0), pairs))) + denum = reduce(list.__iadd__, list(map(operator.itemgetter(1), pairs))) elif parent.op == self.inverse: # If we have inverse(x, y), numx, denumx, numy and denumy # then num is concat(numx, denumy) and denum is @@ -3115,7 +3116,7 @@ if isinstance(node.op, T.Sum): axis = node.op.axis if axis is None: - axis = range(node.inputs[0].ndim) + axis = list(range(node.inputs[0].ndim)) #print 'axis =', axis thing_summed = node.inputs[0] if thing_summed.owner and thing_summed.owner.op == T.true_div: @@ -3242,13 +3243,13 @@ # The old bugged logic. We keep it there to generate a RefactoringTool: Refactored ./theano/tensor/nnet/tests/test_sigm.py warning # when we generated bad code. - alldims = range(summed.owner.inputs[0].type.ndim) + alldims = list(range(summed.owner.inputs[0].type.ndim)) alldims = [d for i, d in enumerate(alldims) if i in summed.owner.op.axis] alldims = [d for i, d in enumerate(alldims) if i in node.op.axis] newaxis_old = [i for i in - xrange(summed.owner.inputs[0].type.ndim) + range(summed.owner.inputs[0].type.ndim) if i not in alldims] if (theano.config.warn.sum_sum_bug and @@ -3349,12 +3350,12 @@ val = get_scalar_constant_value(input) assert val.size == 1 val = val.reshape(1)[0] - to_prod = [shapes[i] for i in xrange(len(shapes)) + to_prod = [shapes[i] for i in range(len(shapes)) if i in node.op.axis] if to_prod: val *= T.mul(*to_prod) return [T.alloc(T.cast(val, dtype=node.outputs[0].dtype), - *[shapes[i] for i in xrange(len(shapes)) + *[shapes[i] for i in range(len(shapes)) if i not in node.op.axis])] except NotScalarConstantError: pass @@ -3556,7 +3557,7 @@ pow2 = [xsym] pow2_scal = [theano.scalar.Scalar(xsym.dtype)()] y_to_do = abs(y) - for i in xrange(int(numpy.log2(y_to_do))): + for i in range(int(numpy.log2(y_to_do))): pow2.append(T.sqr(pow2[i])) pow2_scal.append(theano.scalar.sqr(pow2_scal[i])) rval1 = None @@ -3884,8 +3885,8 @@ pos, neg = local_add_canonizer.get_num_denum(factor) if len(pos) == 1 and not neg: return False, factor, num, denum - pos_pairs = map(local_mul_canonizer.get_num_denum, pos) - neg_pairs = map(local_mul_canonizer.get_num_denum, neg) + pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos)) + neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg)) change = False for n in list(num): success, pos_pairs, neg_pairs = distribute_greedy(pos_pairs, @@ -4258,7 +4259,7 @@ mul_inputs = check_input(mul_neg.owner.inputs) # Put the constant first. - for i in xrange(len(mul_inputs)): + for i in range(len(mul_inputs)): if isinstance(i, Constant): if i == 0: break @@ -4753,7 +4754,7 @@ assert len(new_outputs) == len(node.outputs) try: fgraph.replace_all_validate( - zip(node.outputs, new_outputs), + list(zip(node.outputs, new_outputs)), reason=self.__class__.__name__) did_something = True except InconsistencyError: --- ./theano/tensor/nnet/tests/test_sigm.py (original) +++ ./theano/tensor/nnet/tests/test_sigm.py (refactored) @@ -1,5 +1,5 @@ import unittest -from itertools import imap + import numpy @@ -258,11 +258,11 @@ compute_mul(trees[0]), compute_mul(trees[1])) if not good: - print trees[0] - print trees[1] - print '***' + print(trees[0]) + print(trees[1]) + print('***') theano.printing.debugprint(compute_mul(trees[0])) - print '***' + print('***') theano.printing.debugprint(compute_mul(trees[1])) assert good ok(sigmoid(x) * exp(-x), sigmoid(-x)) @@ -440,7 +440,7 @@ exp =RefactoringTool: Refactored ./theano/tensor/nnet/tests/test_nnet.py RefactoringTool: Refactored ./theano/tensor/nnet/tests/test_conv3d2d.py RefactoringTool: Refactored ./theano/tensor/nnet/tests/test_conv3d.py tensor.exp assert is_1pexp(1 + exp(x)) == (False, x) assert is_1pexp(exp(x) + 1) == (False, x) - for neg, exp_arg in imap(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]): + for neg, exp_arg in map(is_1pexp, [(1 + exp(-x)), (exp(-x) + 1)]): assert not neg and theano.gof.graph.is_same_graph(exp_arg, -x) assert is_1pexp(1 - exp(x)) is None assert is_1pexp(2 + exp(x)) is None --- ./theano/tensor/nnet/tests/test_nnet.py (original) +++ ./theano/tensor/nnet/tests/test_nnet.py (refactored) @@ -1214,11 +1214,11 @@ tensor.grad(c.sum(), x)], mode='FAST_RUN') if 0: for i, n in enumerate(f.maker.fgraph.toposort()): - print i, n + print(i, n) xval = numpy.zeros((5, 5), dtype=dtype).astype(dtype) x2val = numpy.zeros(5, dtype=xval.dtype).astype(dtype) - for i in xrange(100): + for i in range(100): cval, gxval = f(xval, numpy.arange(5), x2val) xval -= 100.3 * gxval #print cval, gxval @@ -1228,7 +1228,7 @@ xval = numpy.zeros((5, 5), dtype=dtype) x2val = numpy.zeros(5, dtype=xval.dtype) - for i in xrange(100): + for i in range(100): cval, gxval = f(xval, numpy.arange(5), x2val) xval += 100000.3 * gxval --- ./theano/tensor/nnet/tests/test_conv3d2d.py (original) +++ ./theano/tensor/nnet/tests/test_conv3d2d.py (refactored) @@ -59,9 +59,9 @@ Wf2 = Wf//2 rval = numpy.zeros((Ns, Ts-Tf+1, Nf, Hs-Hf+1, Ws-Wf+1)) - for ns in xrange(Ns): - for nf in xrange(Nf): - for c in xrange(C): + for ns in range(Ns): + for nf in range(Nf): + for c in range(C): s_i = signals[ns,:,c,:,:] f_i = filters[nf,:,c,:,:] r_i = rval[ns, :, nf, :, :] @@ -83,7 +83,7 @@ t0 = time.time() pyres = pyconv3d(signals, filters) - print time.time() - t0 + print(time.time() - t0) s_signals = shared(signals) s_filters = shared(filters) @@ -99,7 +99,7 @@ t0 = time.time() newconv3d() - print time.time() - t0 + print(time.time() - t0) utt.assert_allclose(pyres, s_output.get_value(borrow=True)) gsignals, gfilters = theano.grad(out.sum(), [s_signals, s_filters]) gnewconv3d = theano.function([], [], @@ -110,7 +110,7 @@ t0 = time.time() gnewconv3d() - print 'grad', time.time() - t0 + print('grad', time.time() - t0) Ns, Ts, C, Hs, Ws = 3, 3, 3, 5, 5 Nf, Tf, C, Hf, Wf = 4, 2, 3, 2, 2 --- ./theano/tensor/nnet/tests/test_conv3d.py (original) +++ ./theano/tensor/nnet/tests/test_conv3d.py (refactored) @@ -266,11 +266,11 @@ W_mat = N.zeros((n, numFilters)) V_mat = N.zeros((batchSize, n)) Hv_mat = N.zeros((batchSize, numFilters)) - for qi in xrange(0, numFilters): + for qi in range(0, numFilters): W_mat[:, qi] = \ self.W.get_value(borrow=True)[qi, :, :, :, :].reshape((n)) Hv_mat[:, qi] = Hv[:, 0, 0, 0, qi] - for qi in xrange(0, batchSize): + for qi in range(0, batchSize): V_mat[qi, :] = \ self.V.get_value(borrow=True)[qi, :, :, :, :].reshape((n)) @@ -281,12 +281,12 @@ tol = 1e-4 if N.abs(H_mat - Hv_mat).max() > tol and not N.allclose(H_mat, Hv_mat): - print H_mat - print Hv_mat - print 'max error: ' + str(N.abs(H_mat - Hv_mat).max()) + print(H_mat) + print(Hv_mat) + print('max error: ' + str(N.abs(H_mat - Hv_mat).max())) W.get_value(borrow=True)[W.get_value(borrow=True) != 0] += 1.0 - print 'min non-zero kernel mag: ' + \ - str(N.abs(W.get_value(borrow=True)).min()) + print('min non-zero kernel mag: ' + \ + str(N.abs(W.get_value(borrow=True)).min())) assert False def test_c_against_mat_transp_mul(self): @@ -332,31 +332,31 @@ n = inputChannels * videoHeight * videoWidth * videoDur rbim = N.zeros((videoHeight, videoWidth, videoDur, inputChannels)) - for qi in xrange(0, inputChannels): + for qi in range(0, inputChannels): rbim[:, :, :, qi] = self.rb.get_value(borrow=True)[qi] rbv = rbim.reshape((n)) W_mat = N.zeros((numFilters, n)) Vv_mat = N.zeros((n, batchSize)) Hv_mat = N.zeros((numFilters, batchSize)) - for qi in xrange(0, numFilters): + for qi in range(0, numFilters): W_mat[qi, :] = \ self.W.get_value(borrow=True)[qi, :, :, :, :].reshape((n)) Hv_mat[qi, :] = Hv[:, 0, 0, 0, qi] - for qi in xrange(0, batchSize): + for qi in range(0, batchSize): Vv_mat[:, qi] = Vv[qi, :, :, :, :].reshape((n)) V_mat = (N.dot(W_mat.transpose(), Hv_mat).transpose() + \ rbv).transpose() if N.abs(V_mat - Vv_mat).max() > 1e-5: - print V_mat - print Vv_mat - - for qq in xrange(V_mat.shape[0]): - for qqq in xrange(Vv_mat.shape[1]): + print(V_mat) + print(Vv_mat) + + for qq in range(V_mat.shape[0]): + for qqq in range(Vv_mat.shape[1]): if abs(V_mat[qq, qqq] - Vv_mat[qq, qqq]) > 1e-5: - print ('wrong at ' + str((qq, qqq)) + ': ' + - str(V_mat[qq, qqq], Vv_mat[qq, qqq])) + print(('wrong at ' + str((qq, qqq)) + ': ' + + str(V_mat[qq, qqq], Vv_mat[qq, qqq]))) assert False def test_c_against_sparse_mat_transp_mul(self): @@ -413,13 +413,13 @@ c = N.zeros(H_shape[1:]) t = N.zeros(H_shape[1:]) - for qi in xrange(0, H_shape[4]): + for qi in range(0, H_shape[4]): h[:, :, :, qi] = qi - for qi in xrange(0, H_shape[1]): + for qi in range(0, H_shape[1]): r[qi, :, :, :] = qi - for qi in xrange(0, H_shape[2]): + for qi in range(0, H_shape[2]): c[:, qi, :, :] = qi - for qi in xrange(0, H_shape[3]): + for qi in range(0, H_shape[3]): t[:, :, qi, :] = qi hn = H_shape[1] * H_shape[2] * H_shape[3] * H_shape[4] @@ -435,14 +435,14 @@ n = inputChannels * videoHeight * videoWidth * videoDur rbim = N.zeros((videoHeight, videoWidth, videoDur, inputChannels)) - for qi in xrange(0, inputChannels): + for qi in range(0, inputChannels): rbim[:, :, :, qi] = self.rb.get_value(borrow=True)[qi] rbv = rbim.reshape((n)) W_mat = N.zeros((hn, n)) Vv_mat = N.zeros((n, batchSize)) Hv_mat = N.zeros((hn, batchSize)) - for qi in xrange(0, hn): + for qi in range(0, hn): hi = h[qi] ri = r[qi] ci = c[qi] @@ -458,7 +458,7 @@ W_mat[qi, :] = placed_filter.reshape((n)) Hv_mat[qi, :] = Hv[:, ri, ci, ti, hi] - for qi in xrange(0, batchSize): + for qi in range(0, batchSize): Vv_mat[:, qi] = Vv[qi, :, :, :, :].reshape((n)) W_mat_T = sparse.csr_matrix(W_mat.transpose()) @@ -467,15 +467,15 @@ V_mat = (temp.transpose() + rbv).transpose() if N.abs(V_mat - Vv_mat).max() > 1e-5: - print 'mul' - print V_mat - print 'conv' - print Vv_mat - for i in xrange(0, n): - for j in xrange(0, batchSize): + print('mul') + print(V_mat) + print('conv') + print(Vv_mat) + for i in range(0, n): + for j in range(0, batchSize): if abs(V_mat[i, j] - Vv_mat[i, j]) > 1e-5: - print ('wrong at %d,%d: %f mul versus %f conv' - % (i, j, V_mat[i, j], Vv_mat[i, j])) + print(('wrong at %d,%d: %f mul versuRefactoringTool: Refactored ./theano/tensor/nnet/tests/test_conv.py RefactoringTool: Refactored ./theano/tensor/nnet/tests/speed_test_conv.py s %f conv' + % (i, j, V_mat[i, j], Vv_mat[i, j]))) assert False def test_infer_shape(self): --- ./theano/tensor/nnet/tests/test_conv.py (original) +++ ./theano/tensor/nnet/tests/test_conv.py (refactored) @@ -403,21 +403,21 @@ def speed(self): n_calls = 20000 - print "n_calls", n_calls + print("n_calls", n_calls) for border_mode in ['valid', 'full']: - print - print border_mode + print() + print(border_mode) for openmp in [False, True]: - print "OpenMP", openmp + print("OpenMP", openmp) image_shapes = [(1, 5, 6, 6), (10, 5, 6, 6), #(10, 10, 16, 16), #(10, 10, 32, 32) ] - print "image_shape", image_shapes + print("image_shape", image_shapes) for image_shape in image_shapes: filter_shapes = [(1, 5, 4, 4), (2, 5, 4, 4), (5, 5, 4, 4)] - print "filter_shapes", filter_shapes + print("filter_shapes", filter_shapes) for filter_shape in filter_shapes: input = theano.shared(numpy.random.random(image_shape)) @@ -435,8 +435,8 @@ t1 = time.time() theano_conv.fn(n_calls=n_calls) t2 = time.time() - print t2 - t1, - print + print(t2 - t1, end=' ') + print() def test_infer_shape(self): # Note: infer_shape is incomplete and thus input and filter shapes --- ./theano/tensor/nnet/tests/speed_test_conv.py (original) +++ ./theano/tensor/nnet/tests/speed_test_conv.py (refactored) @@ -17,14 +17,14 @@ it = reversed(kern) for i in range(kshp[0]): for j in range(kshp[1]): - flip[i,j] = it.next() + flip[i,j] = next(it) elif len(kern.shape)==3: kern=kern.reshape(kern.shape[0],-1) for k in range(kern.shape[0]): it = reversed(kern[k,:]) for i in range(kshp[0]): for j in range(kshp[1]): - flip[k,i,j] = it.next() + flip[k,i,j] = next(it) elif len(kern.shape)==4: kern=kern.reshape(kern.shape[0],kern.shape[1],-1) for k in range(kern.shape[0]): @@ -32,7 +32,7 @@ it = reversed(kern[k,m,:]) for i in range(kshp[0]): for j in range(kshp[1]): - flip[k,m,i,j] = it.next() + flip[k,m,i,j] = next(it) else: raise NotImplementedError() @@ -60,11 +60,11 @@ tctot=0 tpytot=0 - for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns, range(len(nkerns))): + for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns, list(range(len(nkerns)))): if do_print: - print '************* layer %i ***************' % n_layer + print('************* layer %i ***************' % n_layer) - print conv_mode, ss, n_layer, kshp, nkern + print(conv_mode, ss, n_layer, kshp, nkern) # actual values w = global_rng.random_sample(N.r_[nkern,imshp[0],kshp]) @@ -154,11 +154,11 @@ tctot=0 tpytot=0 - for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns, range(len(nkerns))): + for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns, list(range(len(nkerns)))): if do_print: - print '************* layer %i ***************' % n_layer + print('************* layer %i ***************' % n_layer) - print conv_mode, ss, n_layer, kshp, nkern + print(conv_mode, ss, n_layer, kshp, nkern) # actual values w = global_rng.random_sample(N.r_[nkern,imshp[0],kshp]) @@ -224,17 +224,17 @@ t_b_k=[] #calculate the timing with unrolling - print 'time unroll batch kern' + print('time unroll batch kern') best=[] worst=[] t_=[] - for unroll_b, n_b in zip(unroll_batch,range(len(unroll_batch))): - for unroll_k, n_k in zip(unroll_kern,range(len(unroll_kern))): + for unroll_b, n_b in zip(unroll_batch,list(range(len(unroll_batch)))): + for unroll_k, n_k in zip(unroll_kern,list(range(len(unroll_kern)))): t_b_k.append(str(unroll_b)+"/"+str(unroll_k)) if not t_: tctot, tpytot, ntot=[],[],[] - for conv_mode, n_mode in zip(convmodes,range(len(convmodes))): - for ss, n_ss in zip(ssizes,range(len(ssizes))): + for conv_mode, n_mode in zip(convmodes,list(range(len(convmodes)))): + for ss, n_ss in zip(ssizes,list(range(len(ssizes)))): # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=unroll_b, unroll_kern=unroll_k, validate=validate, verbose=verbose,do_print=False) tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=unroll_b, unroll_kern=unroll_k, verbose=verbose,do_print=False, repeat=repeat) tctot+=[tctot_] @@ -253,39 +253,39 @@ t=t_ t=N.asarray(t) #calculate the old timing - print 'time old version' + print('time old version') tctot,tpytot,ntot=[],[],[] tctot_=[] if not tctot_: - for conv_mode, n_mode in zip(convmodes,range(len(convmodes))): - for ss, n_ss in zip(ssizes,range(len(ssizes))): + for conv_mode, n_mode in zip(convmodes,list(range(len(convmodes)))): + for ss, n_ss in zip(ssizes,list(range(len(ssizes)))): # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, validate=validate, verbose=verbose,do_print=False) tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, verbose=verbose,do_print=False, repeat=repeat) tctot+=[tctot_] tpytot+=[tpytot_] ntot+=[ntot_] else: tctot=N.asarray(tctot_) - print "old code timing %.3fs"%sum(tctot),tctot + print("old code timing %.3fs"%sum(tctot),tctot) best=N.asarray(best) worst=N.asarray(worst) - print "timing for unrolled version" - print "unroll_batch/unroll_kern valid_mode full_mode" + print("timing for unrolled version") + print("unroll_batch/unroll_kern valid_mode full_mode") for n_b in range(len(unroll_batch)): for n_k in range(len(unroll_kern)): - print (unroll_batch[n_b], unroll_kern[n_k]) + tuple(t[n_b,n_k]),',' + print((unroll_batch[n_b], unroll_kern[n_k]) + tuple(t[n_b,n_k]),',') t_detail=t t = t.sum(axis=2) - print "max %.3fs"%t.max(), "max param(batch unloop size/kernel unloop size)", t_b_k[t.argmax()] - print "min %.3fs"%t.min(), "min param(batch unloop size/kernel unloop size)", t_b_k[t.argmin()] - print "speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t.min(),sum(tctot)/t.min()) - print worst/best,tctot/best + print("max %.3fs"%t.max(), "max param(batch unloop size/kernel unloop size)", t_b_k[t.argmax()]) + print("min %.3fs"%t.min(), "min param(batch unloop size/kernel unloop size)", t_b_k[t.argmin()]) + print("speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t.min(),sum(tctot)/t.min())) + print(worst/best,tctot/best) #calculate the timing oRefactoringTool: Refactored ./theano/tensor/nnet/sigm.py f unroll_patch - print 'time unroll_patch' + print('time unroll_patch') tctot_patch = [] tctot_patch_size = [] - for conv_mode, n_mode in zip(convmodes,range(len(convmodes))): - for ss, n_ss in zip(ssizes,range(len(ssizes))): + for conv_mode, n_mode in zip(convmodes,list(range(len(convmodes)))): + for ss, n_ss in zip(ssizes,list(range(len(ssizes)))): #tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, validate=validate,unroll_patch=True,verbose=verbose,do_print=False) tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, unroll_patch=True,verbose=verbose,do_print=False, repeat=repeat) tctot_patch += [tctot_] @@ -294,13 +294,13 @@ tctot_patch_size += [tctot_] t_patch=sum(tctot_patch) - print "unroll_patch without shape time", tctot_patch - print "speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t_patch,sum(tctot)/t_patch) - print best/tctot_patch, worst/tctot_patch + print("unroll_patch without shape time", tctot_patch) + print("speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t_patch,sum(tctot)/t_patch)) + print(best/tctot_patch, worst/tctot_patch) t_patch_size=sum(tctot_patch_size) - print "unroll_patch with shape time", tctot_patch_size - print "speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t_patch_size,sum(tctot)/t_patch_size) - print best/tctot_patch_size, worst/tctot_patch_size + print("unroll_patch with shape time", tctot_patch_size) + print("speedup vs (1/1)%.3fx, vs old %.3fx"% (t.max()/t_patch_size,sum(tctot)/t_patch_size)) + print(best/tctot_patch_size, worst/tctot_patch_size) return --- ./theano/tensor/nnet/sigm.py (original) +++ ./theano/tensor/nnet/sigm.py (refactored) @@ -4,7 +4,7 @@ """ import warnings -from itertools import imap + import numpy @@ -143,9 +143,9 @@ 'doc', 'library', 'tensor', 'nnet', 'sigmoid_prec.png') plt.savefig(fname) - print "New picture saved at", fname - print val_ultra.max() - print val_ultra.min() + print("New picture saved at", fname) + print(val_ultra.max()) + print(val_ultra.min()) scalar_sigmoid = ScalarSigmoid(scalar.upgrade_to_float, name='scalar_sigmoid') @@ -589,7 +589,7 @@ return [not neg, sub_tree] else: # Recurse into inputs. - return [False, map(parse_mul_tree, mul_info)] + return [False, list(map(parse_mul_tree, mul_info))] def replace_leaf(arg, leaves, new_leaves, op, neg): @@ -643,7 +643,7 @@ if isinstance(inputs, list): # Recurse through inputs. s_inputs = [] - for s_i in imap(simplify_mul, inputs): + for s_i in map(simplify_mul, inputs): if s_i[1] is None: # Multiplication by +/-1. neg ^= s_i[0] @@ -683,7 +683,7 @@ 'call `simplify_mul` on the tree first?') elif isinstance(inputs, list): # Recurse through inputs. - rval = tensor.mul(*map(compute_mul, inputs)) + rval = tensor.mul(*list(map(compute_mul, inputs))) else: rval = inputs if neg: @@ -739,13 +739,13 @@ if full_tree is None: full_tree = tree if False: # Debug code. - print '' - print ' full_tree = %s' % full_tree - print ' tree = %s' % tree - print ' exp_x = %s' % exp_x - print ' exp_minus_x = %s' % exp_minus_x - print ' sigm_x = %s' % sigm_x - print ' sigm_minus_x= %s' % sigm_minus_x + print('') + print(' full_tree = %s' % full_tree) + print(' tree = %s' % tree) + print(' exp_x = %s' % exp_x) + RefactoringTool: Refactored ./theano/tensor/nnet/nnet.py RefactoringTool: No changes to ./theano/tensor/nnet/conv3d2d.py RefactoringTool: Refactored ./theano/tensor/nnet/conv.py print(' exp_minus_x = %s' % exp_minus_x) + print(' sigm_x = %s' % sigm_x) + print(' sigm_minus_x= %s' % sigm_minus_x) neg, inputs = tree if isinstance(inputs, list): # Recurse through inputs of the multiplication. @@ -866,7 +866,7 @@ if sub_r.owner and sub_r.owner.op == sigmoid: try: val_l = opt.get_scalar_constant_value(sub_l) - except Exception, e: + except Exception as e: return if numpy.allclose(numpy.sum(val_l), 1): return [sigmoid(-sub_r.owner.inputs[0])] @@ -891,8 +891,8 @@ @opt.register_stabilize @gof.local_optimizer([tensor.mul]) def local_sigm_gest(node): - print "CANONICALIZE" - print sigm_canonicalize(node) + print("CANONICALIZE") + print(sigm_canonicalize(node)) def sigm_canonicalize(node): add = tensor.add --- ./theano/tensor/nnet/nnet.py (original) +++ ./theano/tensor/nnet/nnet.py (refactored) @@ -70,7 +70,7 @@ raise ValueError('b must have same number of columns as x') sm = numpy.zeros_like(x) - for i in xrange(sm.shape[0]): + for i in range(sm.shape[0]): row = x[i] + b sm[i] = numpy.exp(row - numpy.max(row)) sm[i] *= 1.0 / numpy.sum(sm[i]) @@ -246,7 +246,7 @@ dy, sm = input_storage dx = numpy.zeros_like(sm) #dx[i,j] = - (\sum_k dy[i,k] sm[i,k]) sm[i,j] + dy[i,j] sm[i,j] - for i in xrange(sm.shape[0]): + for i in range(sm.shape[0]): dy_times_sm_i = dy[i] * sm[i] dx[i] = dy_times_sm_i - sum(dy_times_sm_i) * sm[i] output_storage[0][0] = dx @@ -787,7 +787,7 @@ nll = numpy.zeros(x.shape[0], dtype=node.outputs[0].type. dtype) # nll(y | softmax(x)) am = numpy.zeros_like(y_idx) - for i in xrange(sm.shape[0]): + for i in range(sm.shape[0]): #add the bias vector to the i'th row of x row = x[i] + b @@ -985,7 +985,7 @@ def perform(self, node, input_storage, output_storage): dy, sm, y_idx = input_storage dx = numpy.zeros_like(sm) - for i in xrange(sm.shape[0]): + for i in range(sm.shape[0]): dx[i] = dy[i] * sm[i] # vector scale dx[i, y_idx[i]] -= dy[i] # scalar decrement output_storage[0][0] = dx @@ -1155,7 +1155,7 @@ g_y, coding_dist, true_one_of_n = inp g_coding_strg, = out g_coding = numpy.zeros_like(coding_dist) - for i in xrange(len(g_y)): + for i in range(len(g_y)): g_coding[i, true_one_of_n[i]] = -g_y[i] / coding_dist[i, true_one_of_n[i]] g_coding_strg[0] = g_coding @@ -1217,7 +1217,7 @@ coding, one_of_n = inp y_out, = out y = numpy.zeros_like(coding[:, 0]) - for i in xrange(len(y)): + for i in range(len(y)): y[i] = -numpy.log(coding[i, one_of_n[i]]) y_out[0] = y --- ./theano/tensor/nnet/conv.py (original) +++ ./theano/tensor/nnet/conv.py (refactored) @@ -94,12 +94,12 @@ #accept Constant value for image_shape and filter_shape. if image_shape is not None: image_shape = list(image_shape) - for i in xrange(len(image_shape)): + for i in range(len(image_shape)): if image_shape[i] is not None: try: image_shape[i] = get_scalar_constant_value( as_tensor_variable(image_shape[i])) - except NotScalarConstantError, e: + except NotScalarConstantError as e: raise NotScalarConstantError( "The convolution need that the shape" " information are constant values. We got" @@ -109,12 +109,12 @@ image_shape[i] = int(image_shape[i]) if filter_shape is not None: filter_shape = list(filter_shape) - for i in xrRefactoringTool: Refactored ./theano/tensor/nnet/__init__.py RefactoringTool: Refactored ./theano/tensor/nnet/ConvTransp3D.py ange(len(filter_shape)): + for i in range(len(filter_shape)): if filter_shape[i] is not None: try: filter_shape[i] = get_scalar_constant_value( as_tensor_variable(filter_shape[i])) - except NotScalarConstantError, e: + except NotScalarConstantError as e: raise NotScalarConstantError( "The convolution need that the shape" " information are constant values. We got" @@ -127,7 +127,7 @@ try: assert image_shape[1] == filter_shape[1] except Exception: - print 'image ', image_shape, ' filters ', filter_shape + print('image ', image_shape, ' filters ', filter_shape) raise if filter_shape is not None: @@ -514,7 +514,7 @@ time_unroll_patch = self.speed_unroll_patch_noshape[ mode_idx] time_unroll_batch_kern = 9999999 - for i in xrange(len(self.speed_unroll_batch_kern)): + for i in range(len(self.speed_unroll_batch_kern)): if (bsize % self.speed_unroll_batch_kern[i][0] == 0 and nkern % self.speed_unroll_batch_kern[i][1] == 0): if self.speed_unroll_batch_kern[i][2 + mode_idx] < time_unroll_batch_kern: @@ -729,10 +729,10 @@ val = _valfrommode(self.out_mode) bval = _bvalfromboundary('fill') - for b in xrange(bsize): - for n in xrange(nkern): + for b in range(bsize): + for n in range(nkern): zz[b, n, ...].fill(0) - for im0 in xrange(stacklen): + for im0 in range(stacklen): zz[b, n, ...] += _convolve2d(img2d[b, im0, ...], filtersflipped[n, im0, ...], 1, val, bval, 0) @@ -747,12 +747,12 @@ img2d = img2d2 #N_image_shape = image_data.shape - for b in xrange(bsize): - for n in xrange(nkern): + for b in range(bsize): + for n in range(nkern): zz[b, n, ...].fill(0) - for im0 in xrange(stacklen): - for row in xrange(0, zz.shape[2], self.dx): - for col in xrange(0, zz.shape[3], self.dy): + for im0 in range(stacklen): + for row in range(0, zz.shape[2], self.dx): + for col in range(0, zz.shape[3], self.dy): zz[b, n, row, col] += (img2d[b, im0, row:row + kshp[0], col:col + kshp[1]] * filtersflipped[n, im0, ::-1, ::-1]).sum() @@ -1744,7 +1744,7 @@ def my_dup(st, size): s = "" - for i in xrange(size): + for i in range(size): d["unroll_iter"] = i s += st % d return s + "\n" @@ -1752,9 +1752,9 @@ def my_dup2(st): s = "" iter = 0 - for i in xrange(unroll_bsize): + for i in range(unroll_bsize): d["unroll_biter"] = i - for j in xrange(unroll_ksize): + for j in range(unroll_ksize): d["unroll_kiter"] = j d["unroll_iter"] = iter iter += 1 --- ./theano/tensor/nnet/__init__.py (original) +++ ./theano/tensor/nnet/__init__.py (refactored) @@ -1,8 +1,8 @@ -from nnet import * -from conv import conv2d, ConvOp -from Conv3D import * -from ConvGrad3D import * -from ConvTransp3D import * -from sigm import (softplus, sigmoid, sigmoid_inplace, +from .nnet import * +from .conv import conv2d, ConvOp +from .Conv3D import * +from .ConvGrad3D import * +from .ConvTransp3D import * +from .sigm import (softplus, sigmoid, sigmoid_inplace, scalar_sigmoid, ultra_fast_sigmoid, hard_sigmoid) --- ./RefactoringTool: Refactored ./theano/tensor/nnet/ConvGrad3D.py RefactoringTool: Refactored ./theano/tensor/nnet/Conv3D.py theano/tensor/nnet/ConvTransp3D.py (original) +++ ./theano/tensor/nnet/ConvTransp3D.py (refactored) @@ -356,7 +356,7 @@ if Rshape is not None and Rshape[0] != -1: if Rshape[0] < videoHeight: - print (Rshape[0], videoHeight) + print((Rshape[0], videoHeight)) assert False assert Rshape[1] >= videoWidth assert Rshape[2] >= videoDur @@ -373,14 +373,14 @@ videoWidth, videoDur, inputChannels), dtype=H.dtype) #R[i,j,r,c,t] = b_j + sum_{rc,rk | d \circ rc + rk = r} sum_{cc,ck | ...} sum_{tc,tk | ...} sum_k W[k, j, rk, ck, tk] * H[i,k,rc,cc,tc] - for i in xrange(0, batchSize): + for i in range(0, batchSize): #print '\texample '+str(i+1)+'/'+str(batchSize) - for j in xrange(0, inputChannels): + for j in range(0, inputChannels): #print '\t\tfeature map '+str(j+1)+'/'+str(inputChannels) - for r in xrange(0, videoHeight): + for r in range(0, videoHeight): #print '\t\t\trow '+str(r+1)+'/'+str(videoHeight) - for c in xrange(0, videoWidth): - for t in xrange(0, videoDur): + for c in range(0, videoWidth): + for t in range(0, videoDur): R[i, r, c, t, j] = b[j] ftc = max([0, int(N.ceil( @@ -426,5 +426,5 @@ return R -from Conv3D import conv3D -from ConvGrad3D import convGrad3D +from .Conv3D import conv3D +from .ConvGrad3D import convGrad3D --- ./theano/tensor/nnet/ConvGrad3D.py (original) +++ ./theano/tensor/nnet/ConvGrad3D.py (refactored) @@ -72,15 +72,15 @@ #print 'computing output of shape '+str(WShape) - for k in xrange(0, WShape[1]): - for l in xrange(0, WShape[2]): - for m in xrange(0, WShape[3]): - for i in xrange(0, batchSize): - for p in xrange(0, outputHeight): - for q in xrange(0, outputWidth): - for r in xrange(0, outputDur): - for j in xrange(0, WShape[0]): - for z in xrange(0, WShape[4]): + for k in range(0, WShape[1]): + for l in range(0, WShape[2]): + for m in range(0, WShape[3]): + for i in range(0, batchSize): + for p in range(0, outputHeight): + for q in range(0, outputWidth): + for r in range(0, outputDur): + for j in range(0, WShape[0]): + for z in range(0, WShape[4]): dCdW[j,k,l,m,z] += dCdH[i,p,q,r,j] * V[i,dr*p+k,dc*q+l,dt*r+m,z] output_storage[0][0] = dCdW @@ -276,5 +276,5 @@ convGrad3D = ConvGrad3D() -from Conv3D import conv3D -from ConvTransp3D import convTransp3D +from .Conv3D import conv3D +from .ConvTransp3D import convTransp3D --- ./theano/tensor/nnet/Conv3D.py (original) +++ ./theano/tensor/nnet/Conv3D.py (refactored) @@ -572,7 +572,7 @@ assert len(W.shape) == 5 assert len(V.shape) == 5 if len(b.shape) != 1: - print b.shape + print(b.shape) assert False assert len(d) == 3 @@ -604,19 +604,19 @@ outputWidth, outputDur, outputChannels ), dtype=V.dtype ) #H[i,j,x,y,t] = b_j + sum_k sum_l sum_m sum_z W[j,z,k,l,m] V[i,z, dx*x+k,dy*y+l,dt*t+m] - for i in xrange(0,H.shape[0]): + for i in range(0,H.shape[0]): #print '\texample '+str(i+1)+'/'+str(H.shape[0]) - for j in xrange(0,H.shape[4]): + for j in range(0,H.shape[4]): #print '\t\tfeature map '+str(j+1)+'/'+str(H.shape[1]) - for x in xrange(0,H.shape[1]): + for x in range(0,H.shape[1]): #print '\t\t\trow '+str(x+1)+'/'+str(H.shape[2]) - for y in xrange(0,H.shape[2]): - for t in xrange(0,H.shape[3]): + RefactoringTool: No changes to ./theano/tensor/io.py RefactoringTool: Refactored ./theano/tensor/inplace.py RefactoringTool: No changes to ./theano/tensor/fourier.py RefactoringTool: Refactored ./theano/tensor/extra_ops.py RefactoringTool: Refactored ./theano/tensor/elemwise_cgen.py RefactoringTool: Refactored ./theano/tensor/elemwise.py for y in range(0,H.shape[2]): + for t in range(0,H.shape[3]): H[i,x,y,t,j] = b[j] - for k in xrange(0,filterHeight): - for l in xrange(0,filterWidth): - for m in xrange(0,filterDur): - for z in xrange(0,inputChannels): + for k in range(0,filterHeight): + for l in range(0,filterWidth): + for m in range(0,filterDur): + for z in range(0,inputChannels): #if (i,j,x,y,t) == (0,0,0,0,0): # print (( W[j,z,k,l,m] , V[i,z,d[0]*x+k,d[1]*y+l,d[2]*t+m] ), (k,l,m) ) w = W[j,k,l,m,z] @@ -627,5 +627,5 @@ return H -import ConvGrad3D -import ConvTransp3D +from . import ConvGrad3D +from . import ConvTransp3D --- ./theano/tensor/inplace.py (original) +++ ./theano/tensor/inplace.py (refactored) @@ -1,5 +1,5 @@ from theano import scalar as scal -import elemwise +from . import elemwise from theano import printing from theano.printing import pprint @@ -292,7 +292,7 @@ def transpose_inplace(x, **kwargs): """Perform a transpose on a tensor without copying the underlying storage""" - dims = range(x.ndim-1, -1, -1) + dims = list(range(x.ndim-1, -1, -1)) return elemwise.DimShuffle(x.broadcastable, dims, inplace=True)(x) #pprint.assign(transpose_inplace, printing.MemberPrinter('T')) --- ./theano/tensor/extra_ops.py (original) +++ ./theano/tensor/extra_ops.py (refactored) @@ -285,7 +285,9 @@ return [[True], [False]] - def grad(self, (x, repeats), (gz, )): + def grad(self, xxx_todo_changeme, xxx_todo_changeme1): + (x, repeats) = xxx_todo_changeme + (gz, ) = xxx_todo_changeme1 if repeats.ndim == 0: if self.axis is None: axis = x.ndim --- ./theano/tensor/elemwise_cgen.py (original) +++ ./theano/tensor/elemwise_cgen.py (refactored) @@ -229,7 +229,7 @@ s = preloops.get(0, "") else: s = "" - for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))): + for i, (pre_task, task), indices in reversed(list(zip(list(range(len(loop_tasks) - 1)), loop_tasks, list(zip(*loop_orders))))): s = loop_over(preloops.get(i, "") + pre_task, s + task, indices, i) s += loop_tasks[-1] @@ -319,7 +319,7 @@ %(ovar)s_loops_it = %(ovar)s_loops.begin(); """ % locals() - for i in xrange(nnested): + for i in range(nnested): declare_totals += """ int TOTAL_%(i)i = init_totals[%(ovar)s_loops_it->second]; ++%(ovar)s_loops_it; @@ -361,13 +361,13 @@ std::vector< std::pair >::reverse_iterator %(ovar)s_loops_rit; """ % locals() - for i in xrange(nvars): + for i in range(nvars): var = sub["lv%i" % i] declare_strides_jumps += """ %(ovar)s_loops_rit = %(ovar)s_loops.rbegin();""" % locals() adjust = "0" - for j in reversed(range(nnested)): + for j in reversed(list(range(nnested))): jump = "(%s) - (%s)" % ("%(var)s_stride_l%(j)i" % locals(), adjust) declare_strides_jumps +=""" int %(var)s_stride_l%(j)i = init_strides[%(i)i][%(ovar)s_loops_rit->second]; @@ -382,11 +382,11 @@ declare_iter += "%(var)s_iter = (%(dtype)s*)(PyArray_DATA(%(var)s));\n" % locals() loop = inner_task - for i in reversed(range(nnested)): + for i in reversed(list(range(nnested))): iterv = 'ITER_%i' % i total = 'TOTAL_%i' % i update = '' - for j in xrange(nvars): + for j in range(nvars): var = sub["lv%i" % j] update += "%(var)s_iter += %(var)s_jump_l%(i)i;\n" % locals() --- ./theano/tensor/elemwise.py (original) +++ ./theano/tensor/elemwise.py (refactored) @@ -1,6 +1,6 @@ import sys from copy import copy -from itertools import izip + import numpy @@ -20,7 +20,7 @@ # We cannot import discrete_dtypes from tensor.basic yet, # so we redefine them here -discrete_dtypes = map(str, scalar.discrete_types) +discrete_dtypes = list(map(str, scalar.discrete_types)) # tensor depends on elemwise to provide definitions for several ops @@ -314,7 +314,7 @@ str(nd_out) + '-1] = PyArray_DESCR(%(basename)s)->elsize' ) - for i in xrange(nd_out - 2, -1, -1): + for i in range(nd_out - 2, -1, -1): strides_statements.append( "if (strides[%(i)s] == 0) strides[%(i)s] = strides[%(i)s+1] * dimensions[%(i)s+1]" % dict(i=str(i))) @@ -352,16 +352,16 @@ + close_bracket) if 0: - print 'C_CODE' - print '' - print self - print "IN BROAD", self.input_broadcastable - print "NEW ORDER", self.new_order - print "SHUFFLE", self.shuffle - print "AUGMENT", self.augment - print '------------' - print '' - print full_code + print('C_CODE') + print('') + print(self) + print("IN BROAD", self.input_broadcastable) + print("NEW ORDER", self.new_order) + print("SHUFFLE", self.shuffle) + print("AUGMENT", self.augment) + print('------------') + print('') + print(full_code) if 0: sys.exit() @@ -395,9 +395,9 @@ if new_order != () and new_order[0] == 'x': return "%s" % self.__p(new_order[1:], pstate, r) # return "[%s]" % self.__p(new_order[1:], pstate, r) - if list(new_order) == range(r.type.ndim): + if list(new_order) == list(range(r.type.ndim)): return pstate.pprinter.process(r) - if list(new_order) == list(reversed(range(r.type.ndim))): + if list(new_order) == list(reversed(list(range(r.type.ndim)))): return "%s.T" % pstate.pprinter.process(r) return "DimShuffle{%s}(%s)" % (", ".join(map(str, new_order)), pstate.pprinter.process(r)) @@ -474,7 +474,7 @@ self.name = name self.scalar_op = scalar_op self.inplace_pattern = inplace_pattern - self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items()) + self.destroy_map = dict((o, [i]) for o, i in list(inplace_pattern.items())) self.ufunc = None self.nfunc = None @@ -513,7 +513,7 @@ is left-completed to the greatest number of dimensions with 1s using DimShuffle. """ - inputs = map(as_tensor_variable, inputs) + inputs = list(map(as_tensor_variable, inputs)) shadow = self.scalar_op.make_node( *[Scalar(dtype=i.type.dtype)() for i in inputs]) @@ -529,7 +529,7 @@ # TODO: use LComplete instead args.append(DimShuffle( input.type.broadcastable, - ['x'] * difference + range(length), + ['x'] * difference + list(range(length)), inplace=False)(input)) inputs = args @@ -542,14 +542,14 @@ # it is multiplied by nout because Elemwise supports multiple outputs # (nout of them) out_broadcastables = [[all(bcast) - for bcast in izip(*[input.type.broadcastable + for bcast in zip(*[input.type.broadcastable for input in inputs])]] * shadow.nout #inplace_pattern maps output idx -> input idx inplace_pattern = self.inplace_pattern if inplace_pattern: - for overwriter, overwritten in inplace_pattern.items(): - for ob, ib in izip(out_broadcastables[overwriter], + for overwriter, overwritten in list(inplace_pattern.items()): + for ob, ib in zip(out_broadcastables[overwriter], inputs[overwritten].type.broadcastable): if ib and not ob: raise ValueError(( @@ -558,20 +558,20 @@ out_dtypes = [o.type.dtype for o in shadow.outputs] if any(inputs[i].type.dtype != out_dtypes[o] - for o, i in inplace_pattern.items()): + for o, i in list(inplace_pattern.items())): raise TypeError(( "Cannot do an inplace operation on incompatible data types.", ([i.type.dtype for i in inputs], out_dtypes, inplace_pattern))) outputs = [TensorType(dtype=dtype, broadcastable=broadcastable)() - for dtype, broadcastable in izip(out_dtypes, out_broadcastables) + for dtype, broadcastable in zip(out_dtypes, out_broadcastables) ] return Apply(self, inputs, outputs) def __eq__(self, other): if type(self) == type(other): - items = self.inplace_pattern.items() - other_items = other.inplace_pattern.items() + items = list(self.inplace_pattern.items()) + other_items = list(other.inplace_pattern.items()) items.sort() other_items.sort() rval = ((self.scalar_op == other.scalar_op) @@ -591,7 +591,7 @@ def __str__(self): if self.name is None: if self.inplace_pattern: - items = self.inplace_pattern.items() + items = list(self.inplace_pattern.items()) items.sort() return "Elemwise{%s}%s" % (self.scalar_op, str(items)) else: @@ -612,7 +612,7 @@ bgrads = self._bgrad(inputs, ograds) rop_out = None - for jdx, (inp, eval_point) in enumerate(izip(inputs, + for jdx, (inp, eval_point) in enumerate(zip(inputs, eval_points)): # if None, then we can just ignore this branch .. # what we do is to assume that for any non-differentiable @@ -664,7 +664,7 @@ # can tell this op did # the right thing. new_rval = [] - for elem, ipt in izip(rval, inputs): + for elem, ipt in zip(rval, inputs): if isinstance(elem.type, (NullType, DisconnectedType)): new_rval.append(elem) else: @@ -720,8 +720,8 @@ return t return Scalar(t.type.dtype)() - scalar_inputs = map(as_scalar, inputs) - scalar_ograds = map(as_scalar, ograds) + scalar_inputs = list(map(as_scalar, inputs)) + scalar_ograds = list(map(as_scalar, ograds)) scalar_igrads = self.scalar_op.grad(scalar_inputs, scalar_ograds) for igrad in scalar_igrads: assert igrad is not None, self.scalar_op @@ -755,7 +755,7 @@ *[transform(ipt) for ipt in node.inputs]) return new_r ret = [] - for scalar_igrad, ipt in izip(scalar_igrads, inputs): + for scalar_igrad, ipt in zip(scalar_igrads, inputs): if scalar_igrad is None: # undefined gradient ret.append(None) @@ -766,8 +766,8 @@ def perform(self, node, inputs, output_storage): maxsize = max(len(input.shape) for input in inputs) - for dims in izip(*[([(1, True)] * (maxsize - len(input.shape)) - + zip(input.shape, sinput.type.broadcastable)) + for dims in zip(*[([(1, True)] * (maxsize - len(input.shape)) + + list(zip(input.shape, sinput.type.broadcastable))) for input, sinput in zip(inputs, node.inputs)]): if max(d for d, b in dims) != 1 and (1, False) in dims: # yes there may be more compact ways to write this code, @@ -791,7 +791,7 @@ # Determine the shape of outputs out_shape = [] - for values in izip(*[input.shape for input in inputs]): + for values in zip(*[input.shape for input in inputs]): if any(v == 0 for v in values): # All non-broadcasted dimensions should be zero assert max(values) <= 1 @@ -801,7 +801,7 @@ out_shape = tuple(out_shape) if not self.inplace_pattern: - for output, storage in izip(node.outputs, output_storage): + for output, storage in zip(node.outputs, output_storage): odat = storage[0] if odat is not None: if odat.shape != out_shape: @@ -813,7 +813,7 @@ storage[0] = odat else: for i, (output, storage) in enumerate( - izip(node.outputs, output_storage)): + zip(node.outputs, output_storage)): #i is an output idx if i in self.inplace_pattern: odat = inputs[self.inplace_pattern[i]] @@ -852,7 +852,7 @@ if nout == 1: variables = [variables] - for variable, storage, nout in izip(variables, output_storage, + for variable, storage, nout in zip(variables, output_storage, node.outputs): if str(getattr(variable, "dtype", "")) == 'object': # Since numpy 1.6, function created with numpy.frompyfunc @@ -885,7 +885,7 @@ else: # there must be some input that is not broadcastable in # dimension 'dim' - for ishp, i in izip(i_shapes, node.inputs): + for ishp, i in zip(i_shapes, node.inputs): if isinstance(i.type, theano.scalar.Scalar): continue # we skip scalar if not i.type.broadcastable[dim]: @@ -915,7 +915,7 @@ # assert that inames and inputs order stay consistent. # This is to protect again futur change of uniq. assert len(inames) == len(inputs) - ii, iii = zip(*gof.utils.uniq(zip(_inames, node.inputs))) + ii, iii = list(zip(*gof.utils.uniq(list(zip(_inames, node.inputs))))) assert all([x == y for x,y in zip(ii, inames)]) assert all([x == y for x,y in zip(iii, inputs)]) @@ -926,15 +926,15 @@ # that overwrite them. We just convert them to the actual # Variables. dmap = dict([(node.outputs[o], [node.inputs[i]]) - for o, i in self.inplace_pattern.iteritems()]) + for o, i in self.inplace_pattern.items()]) # dtypes of the inputs idtypes = [input.type.dtype_specs()[1] for input in inputs] # These are the outputs that we will need to allocate # (output, name, name of the c type), transposed - real = zip(*[(r, s, r.type.dtype_specs()[1]) - for r, s in izip(node.outputs, onames) if r not in dmap]) + real = list(zip(*[(r, s, r.type.dtype_specs()[1]) + for r, s in zip(node.outputs, onames) if r not in dmap])) if real: real_outputs, real_onames, real_odtypes = real else: @@ -943,8 +943,8 @@ # Outputs that are aliased with an input (inplace) # (output, name), transposed (c type name not needed since we don't # need to allocate. - aliased = zip(*[(r, s) - for (r, s) in izip(node.outputs, onames) if r in dmap]) + aliased = list(zip(*[(r, s) + for (r, s) in zip(node.outputs, onames) if r in dmap])) if aliased: aliased_outputs, aliased_onames = aliased else: @@ -960,7 +960,7 @@ # dimensionality) nnested = len(orders[0]) sub = dict(sub) - for i, (input, iname) in enumerate(izip(inputs, inames)): + for i, (input, iname) in enumerate(zip(inputs, inames)): # the c generators will substitute the input names for # references to loop variables lv0, lv1, ... sub['lv%i' % i] = iname @@ -970,7 +970,7 @@ # Check if all inputs (except broadcasted scalar) are fortran. # In that case, create an fortran output ndarray. - z = zip(inames, inputs) + z = list(zip(inames, inputs)) alloc_fortran = ' && '.join(["PyArray_ISFORTRAN(%s)" % arr for arr, var in z if not all(var.broadcastable)]) @@ -983,16 +983,16 @@ # We loop over the "real" outputs, i.e., those that are not # inplace (must be allocated) and we declare/allocate/check # them - for output, oname, odtype in izip( + for output, oname, odtype in zip( real_outputs, real_onames, real_odtypes): i += 1 # before this loop, i = number of inputs sub['lv%i' % i] = oname sub['olv'] = oname - alloc += cgen.make_declare([range(nnested)], [odtype], + alloc += cgen.make_declare([list(range(nnested))], [odtype], dict(sub, lv0=oname)) alloc += cgen.make_alloc(orders, odtype, sub, fortran=alloc_fortran) - alloc += cgen.make_checks([range(nnested)], [odtype], + alloc += cgen.make_checks([list(range(nnested))], [odtype], dict(sub, lv0=oname)) olv_index = i # index of the last output @@ -1000,7 +1000,7 @@ # inplace (overwrite the contents of one of the inputs) and # make the output pointers point to theur corresponding input # pointers. - for output, oname in izip(aliased_outputs, aliased_onames): + for output, oname in zip(aliased_outputs, aliased_onames): olv_index = inputs.index(dmap[output][0]) iname = inames[olv_index] # We make the output point to the corresponding input and @@ -1026,7 +1026,7 @@ # not be declared, as they are #defined in defines task_decl = "".join([ "%s& %s_i = *%s_iter;\n" % (dtype, name, name) - for name, dtype in izip(inames + list(real_onames), + for name, dtype in zip(inames + list(real_onames), idtypes + list(real_odtypes))]) # We generate the C code of the inner loop using the scalar op @@ -1058,13 +1058,13 @@ all_code = [code] loop = cgen.make_loop( - loop_orders=orders + [range(nnested)] * len(real_onames), + loop_orders=orders + [list(range(nnested))] * len(real_onames), dtypes=(idtypes + list(real_odtypes)), loop_tasks=all_code, sub=sub) else: loop = cgen.make_reordered_loop( - init_loop_orders=orders + [range(nnested)] * len(real_onames), + init_loop_orders=orders + [list(range(nnested))] * len(real_onames), olv_index=olv_index, dtypes=(idtypes + list(real_odtypes)), inner_task=code, @@ -1118,7 +1118,7 @@ } """ % locals() if contig is not None: - z = zip(inames + onames, inputs + node.outputs) + z = list(zip(inames + onames, inputs + node.outputs)) cond1 = ' && '.join(["PyArray_ISCONTIGUOUS(%s)" % arr for arr, var in z if not all(var.broadcastable)]) @@ -1276,7 +1276,7 @@ input = as_tensor_variable(input) axis = self.axis if axis is None: - axis = range(len(input.type.broadcastable)) + axis = list(range(len(input.type.broadcastable))) if any([a < 0 for a in axis]): axis2 = [] for a in self.axis: @@ -1331,7 +1331,7 @@ output, = out axis = self.axis if axis is None: - axis = range(input.ndim) + axis = list(range(input.ndim)) variable = inputRefactoringTool: Refactored ./theano/tensor/deprecated/test_rmodule.py to_reduce = reversed(sorted(axis)) @@ -1413,7 +1413,7 @@ axis = self.axis if axis is None: - axis = range(len(input.type.broadcastable)) + axis = list(range(len(input.type.broadcastable))) if len(axis) == 0: # The acc_dtype is never a downcast compared to the input dtype @@ -1424,13 +1424,13 @@ assert var.dtype == node.outputs[0].dtype return var.owner.op._c_all(var.owner, name, inames, onames, sub) - order1 = [i for i in xrange(input.type.ndim) if i not in axis] + order1 = [i for i in range(input.type.ndim) if i not in axis] order = order1 + list(axis) nnested = len(order1) sub = dict(sub) - for i, (input, iname) in enumerate(izip(node.inputs, inames)): + for i, (input, iname) in enumerate(zip(node.inputs, inames)): sub['lv%i' % i] = iname decl = "" @@ -1453,11 +1453,11 @@ # Allocate output buffer alloc += cgen.make_declare( - [range(nnested) + ['x'] * len(axis)], + [list(range(nnested)) + ['x'] * len(axis)], [odtype], dict(sub, lv0=oname)) alloc += cgen.make_alloc([order1], odtype, sub) alloc += cgen.make_checks( - [range(nnested) + ['x'] * len(axis)], + [list(range(nnested)) + ['x'] * len(axis)], [odtype], dict(sub, lv0=oname)) if adtype != odtype: @@ -1466,11 +1466,11 @@ sub['olv'] = aname alloc += cgen.make_declare( - [range(nnested) + ['x'] * len(axis)], + [list(range(nnested)) + ['x'] * len(axis)], [adtype], dict(sub, lv0=aname)) alloc += cgen.make_alloc([order1], adtype, sub) alloc += cgen.make_checks( - [range(nnested) + ['x'] * len(axis)], + [list(range(nnested)) + ['x'] * len(axis)], [adtype], dict(sub, lv0=aname)) if hasattr(self.scalar_op, 'identity'): @@ -1495,7 +1495,7 @@ pattern = [0] * len(node.inputs[0].broadcastable) axis = self.axis if axis is None: - axis = range(len(pattern)) + axis = list(range(len(pattern))) for i in axis: pattern[i] = 1 pattern_ = str(pattern)[1:-1] @@ -1551,7 +1551,7 @@ else: all_code = [task0_decl + code1] loop = cgen.make_loop( - [order, range(nnested) + ['x'] * len(axis)], + [order, list(range(nnested)) + ['x'] * len(axis)], [idtype, adtype], all_code, sub) end = "" @@ -1846,7 +1846,7 @@ gz = as_tensor_variable(gz) axis = self.axis if axis is None: - axis = range(x.type.ndim) + axis = list(range(x.type.ndim)) if axis == (): return gz, new_dims = [] @@ -1965,7 +1965,7 @@ gz = as_tensor_variable(gz) axis = self.axis if axis is None: - axis = range(prod_in.type.ndim) + axis = list(range(prod_in.type.ndim)) if axis == (): return gz, new_dims = [] --- ./theano/tensor/deprecated/test_rmodule.py (original) +++ ./theano/tensor/deprecated/test_rmodule.py (refactored) @@ -14,19 +14,19 @@ class T_test_module(unittest.TestCase): def test_state_propagation(self): if 1: - print >> sys.stderr, "RModule deprecated" + print("RModule deprecated", file=sys.stderr) else: x = tensor.vector() rk = RandomKit('rk', 1000) f = compile.function([x, (rk, [gof.Container(r = gof.generic, storage = [123], name='bla')])], rk.binomial(tensor.shape(x))) - print "RK", rk.value + print("RK", rk.value) f['rk'] = 9873456 - print "RK", rk.value + print("RK", rkRefactoringTool: Refactored ./theano/tensor/deprecated/rmodule.py RefactoringTool: No changes to ./theano/tensor/blas_scipy.py RefactoringTool: No changes to ./theano/tensor/blas_headers.py RefactoringTool: No changes to ./theano/tensor/blas_c.py RefactoringTool: Refactored ./theano/tensor/blas.py .value) - rvals = [f([1,2,3,4,6, 7, 8]) for i in xrange(5)] - print rvals - for i in xrange(5-1): - for j in xrange(i+1, 5): + rvals = [f([1,2,3,4,6, 7, 8]) for i in range(5)] + print(rvals) + for i in range(5-1): + for j in range(i+1, 5): assert not N.all(rvals[i] == rvals[j]) def test_B(self): @@ -64,9 +64,9 @@ "0.054382248842 0.563459168529 0.192757276954 0.360455221883 0.174805216702", "0.961942907777 0.49657319422 0.0316111492826 0.0915054717012 0.195877184515"] - for i in xrange(5): + for i in range(5): s = " ".join([str(n) for n in m.f(N.ones(5))]) - print s + print(s) assert s == rvals[i] if __name__ == '__main__': --- ./theano/tensor/deprecated/rmodule.py (original) +++ ./theano/tensor/deprecated/rmodule.py (refactored) @@ -58,7 +58,7 @@ def distribute(self, value, indices, containers): rg = partial(numpy.random.RandomState(int(value)).randint, 2**30) - elems = deque(zip(indices, containers)) + elems = deque(list(zip(indices, containers))) i = 0 while elems: index, container = elems.popleft() @@ -119,7 +119,7 @@ # situation it will reseed all the rngs using the containers # associated to them. c._rkit.kit.distribute(seedgen.random_integers(2**30), - xrange(len(inst2._rkit)), inst2._rkit) + range(len(inst2._rkit)), inst2._rkit) else: - self._rkit.kit.distribute(seedgen.random_integers(2**30), xrange(len(inst._rkit)), inst._rkit) + self._rkit.kit.distribute(seedgen.random_integers(2**30), range(len(inst._rkit)), inst._rkit) --- ./theano/tensor/blas.py (original) +++ ./theano/tensor/blas.py (refactored) @@ -133,6 +133,7 @@ import numpy import numpy.distutils import numpy.distutils.system_info +from functools import reduce try: import numpy.distutils.__config__ except ImportError: @@ -305,7 +306,7 @@ numpy.dtype('complex64'): fblas.cgemv, numpy.dtype('complex128'): fblas.zgemv, } -except ImportError, e: +except ImportError as e: have_fblas = False # This is used in Gemv and ScipyGer. We use CGemv and CGer # when theano.config.blas.ldflags is defined. So we don't need a @@ -913,7 +914,7 @@ return dict(inplace=self.inplace) def make_node(self, *inputs): - inputs = map(T.as_tensor_variable, inputs) + inputs = list(map(T.as_tensor_variable, inputs)) if len(inputs) != 5: raise TypeError( "Wrong number of inputs for %s (expected 5, got %s)" % @@ -923,7 +924,7 @@ # For the consistency check we don't want z to be a cached constant. if getattr(z, 'cached', False): z = copy.copy(z) - zr, xr, yr = [set(view_roots(i)) for i in z, x, y] + zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)] # We want the gemm to be inplace. When this op is inplace, it # declare to be inplace only on z. So to make it safe, we @@ -1399,10 +1400,10 @@ lst = lst2 # Try every pair in the sM_list, trying to turn it into a gemm operation - for i in xrange(len(lst) - 1): + for i in range(len(lst) - 1): s_i, M_i = lst[i] - for j in xrange(i + 1, len(lst)): + for j in range(i + 1, len(lst)): s_j, M_j = lst[j] if M_i.type != M_j.type: @@ -1515,7 +1516,7 @@ time_canonicalize += time1 time_factor_can += time2 time_factor_list += time3 - except InconsistencyError, e: + except InconsistencyError as e: nb_inconsistency_make += 1 RefactoringTool: Refactored ./theano/tensor/basic.py continue if new_outputs: @@ -1523,7 +1524,7 @@ assert len(new_outputs) == len(node.outputs) try: fgraph.replace_all_validate_remove( - zip(node.outputs, new_outputs), + list(zip(node.outputs, new_outputs)), [old_dot22], reason='GemmOptimizer', #For now we disable the warning as we know case @@ -1532,11 +1533,11 @@ ) did_something = True nb_replacement += 1 - except InconsistencyError, e: + except InconsistencyError as e: # TODO: retry other applications of gemm (see comment # in _gemm_from_node) nb_inconsistency_replace += 1 - except ReplacementDidntRemovedError, e: + except ReplacementDidntRemovedError as e: nb_replacement_didn_t_remove += 1 self.warned = True nb_iter += 1 @@ -1549,16 +1550,16 @@ def print_profile(stream, prof, level=0): blanc = (' ' * level) #1946.912556s - ('gemm_optimizer', 'GemmOptimizer', 1) - print >> stream, blanc, "GemmOptimizer" - print >> stream, blanc, " nb_iter", prof[1] - print >> stream, blanc, " nb_replacement", prof[2] - print >> stream, blanc, " nb_replacement_didn_t_remove", prof[3] - print >> stream, blanc, " nb_inconsistency_make", prof[4] - print >> stream, blanc, " nb_inconsistency_replace", prof[5] - print >> stream, blanc, " time_canonicalize", prof[6] - print >> stream, blanc, " time_factor_can", prof[7] - print >> stream, blanc, " time_factor_list", prof[8] - print >> stream, blanc, " time_toposort", prof[9] + print(blanc, "GemmOptimizer", file=stream) + print(blanc, " nb_iter", prof[1], file=stream) + print(blanc, " nb_replacement", prof[2], file=stream) + print(blanc, " nb_replacement_didn_t_remove", prof[3], file=stream) + print(blanc, " nb_inconsistency_make", prof[4], file=stream) + print(blanc, " nb_inconsistency_replace", prof[5], file=stream) + print(blanc, " time_canonicalize", prof[6], file=stream) + print(blanc, " time_factor_can", prof[7], file=stream) + print(blanc, " time_factor_list", prof[8], file=stream) + print(blanc, " time_toposort", prof[9], file=stream) class Dot22(GemmRelated): @@ -1582,7 +1583,7 @@ z, = out try: z[0] = numpy.asarray(numpy.dot(x, y)) - except ValueError, e: + except ValueError as e: # The error raised by numpy has no shape information, we mean to # add that e.args = e.args + (x.shape, y.shape) @@ -1855,7 +1856,7 @@ z, = out try: z[0] = numpy.asarray(scalar * numpy.dot(x, y)) - except ValueError, e: + except ValueError as e: # The error raised by numpy has no shape information, we # mean to add that e.args = e.args + (x.shape, y.shape) --- ./theano/tensor/basic.py (original) +++ ./theano/tensor/basic.py (refactored) @@ -4,7 +4,7 @@ import sys import warnings -from itertools import izip + import numpy from copy import copy as python_copy @@ -43,13 +43,13 @@ python_all = all # Define common subsets of dtypes (as strings). -complex_dtypes = map(str, scal.complex_types) -continuous_dtypes = map(str, scal.continuous_types) -float_dtypes = map(str, scal.float_types) -discrete_dtypes = map(str, scal.discrete_types) -all_dtypes = map(str, scal.all_types) -int_dtypes = map(str, scal.int_types) -uint_dtypes = map(str, scal.uint_types) +complex_dtypes = list(map(str, scal.complex_types)) +continuous_dtypes = list(map(str, scal.continuous_types)) +float_dtypes = list(map(str, scal.float_types)) +discrete_dtypes = list(map(str, scal.discrete_types)) +all_dtypes = list(map(str, scal.all_types)) +int_dtypes = list(map(str, scal.int_types)) +uint_dtypes = list(map(str, scal.uint_types)) class ShapeError(Exception): @@ -68,7 +68,7 @@ elif (isinstance(x, numpy.random.RandomState) and isinstance(y, numpy.random.RandomState)): return python_all(numpy.all(a == b) for a, b in - izip(x.__getstate__(), y.__getstate__())) + zip(x.__getstate__(), y.__getstate__())) else: return x == y @@ -343,7 +343,7 @@ x_ = autocast_int(x) elif rtype is TensorConstant and isinstance(x, float): x_ = autocast_float(x) - elif rtype is TensorConstant and isinstance(x, long): + elif rtype is TensorConstant and isinstance(x, int): # We need to address the case where a long number is used in a # Theano graph, because on Windows 64, all shapes are expressed # with longs. @@ -624,7 +624,7 @@ v.owner.inputs[0].owner.inputs) and len(v.owner.op.idx_list) == 1 and #idx_list can contain Scalar Type object. - isinstance(v.owner.op.idx_list[0], (int, long, + isinstance(v.owner.op.idx_list[0], (int, numpy.integer))): # Python 2.4 does not support indexing with numpy.integer @@ -680,7 +680,7 @@ if names == 1: return f() else: - return [f() for i in xrange(names[0])] + return [f() for i in range(names[0])] if isinstance(names, tuple): if len(names) == 1: names = names[0] @@ -1264,7 +1264,7 @@ def infer_shape(self, node, shapes): xshape, sshape = shapes new_shape = [] - for dim in xrange(node.inputs[0].ndim): + for dim in range(node.inputs[0].ndim): try: s = get_scalar_constant_value(node.inputs[1][dim]) s = as_tensor_variable(s) @@ -1368,9 +1368,9 @@ if axis[idx] < 0: axis[idx] += x.type.ndim axis.sort() - if axis == range(-x.type.ndim, 0, 1): - axis = range(x.type.ndim) - assert axis == range(x.type.ndim), ( + if axis == list(range(-x.type.ndim, 0, 1)): + axis = list(range(x.type.ndim)) + assert axis == list(range(x.type.ndim)), ( "MaxAndArgmax does not support multiple" " axes. the max fct supports it.") elif isinstance(axis, Variable): @@ -1388,7 +1388,7 @@ raise ValueError('axis out of range') axis[id] = x.type.ndim + a if axis is None: - axis = _as_tensor_variable(range(x.type.ndim)) + axis = _as_tensor_variable(list(range(x.type.ndim))) else: axis = _as_tensor_variable(axis) @@ -1413,7 +1413,7 @@ def perform(self, node, inp, outs): x, axis = inp max, max_idx = outs - if python_all(axis == range(x.ndim)): + if python_all(axis == list(range(x.ndim))): axis = None max[0] = theano._asarray(numpy.max(x, axis), dtype=node.outputs[0].dtype) @@ -1422,7 +1422,7 @@ def infer_shape(self, node, shapes): ishape, axis_shape = shapes axis = node.inputs[1] - if python_all(axis.data == range(node.inputs[0].ndim)): + if python_all(axis.data == list(range(node.inputs[0].ndim))): return [(), ()] rval = tuple([ishape[i] for (i, b) in enumerate( node.inputs[0].type.broadcastable) if i != axis.data]) @@ -1485,7 +1485,7 @@ # Raise the g_max and xmax to the same number of dim as the input. pattern = [] out_dim = 0 - if python_all(axis.data == range(x.ndim)): + if python_all(axis.data == list(range(x.ndim))): # We are taking the max/argmax over all dimensions. axis = None for i in range(x.ndim): @@ -1518,7 +1518,7 @@ y = as_tensor_variable(y) if axis is None: - axis = range(x.type.ndim) + axis = list(range(x.type.ndim)) elif isinstance(axis, (int, numpy.integer)): axis = [axis] elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: @@ -2194,7 +2194,7 @@ return matrix_result else: if a.ndim > 0: - tuple_result = tuple([matrix_result[i] for i in xrange(a.ndim)]) + tuple_result = tuple([matrix_result[i] for i in range(a.ndim)]) else: tuple_result = tuple([matrix_result[0]]) return tuple_result @@ -2287,7 +2287,7 @@ return [out_shape] def grad(self, inp, grads): - return [grad_undefined(self, i, inp[i]) for i in xrange(3)] + return [grad_undefined(self, i, inp[i]) for i in range(3)] def __eq__(self, other): return type(self) == type(other) and self.dtype == other.dtype @@ -2396,7 +2396,7 @@ return [out_shape] def grad(self, inp, grads): - return [grad_undefined(self, i, inp[i]) for i in xrange(3)] + return [grad_undefined(self, i, inp[i]) for i in range(3)] def __eq__(self, other): return type(self) == type(other) and self.dtype == other.dtype @@ -2567,7 +2567,7 @@ gz = grads[0] n_axes_to_sum = gz.ndim - x.ndim #The number of dimensions added - axis = range(n_axes_to_sum) + axis = list(range(n_axes_to_sum)) #The broadcasted dimensions axis_broadcasted = [] for i, (ib, gb) in enumerate( @@ -2631,11 +2631,11 @@ # If the output is a constant, it will have to be deepcopied # each time the function is called. So we do not fold. return False - elif (not isinstance(client[0], basestring) + elif (not isinstance(client[0], str) and isinstance(client[0].op, ( theano.tensor.subtensor.IncSubtensor, theano.tensor.subtensor.AdvancedIncSubtensor1, - theano.tensor.subtensor.AdvancedIncSubtensor, + theano.tensor.subtensor.AdvancedIncSubtensor ))): return False return True @@ -2811,7 +2811,7 @@ shp = cast(shp, 'float64') if axis is None: - axis = range(input.ndim) + axis = list(range(input.ndim)) elif isinstance(axis, (int, numpy.integer)): axis = [axis] elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: @@ -2847,7 +2847,7 @@ input_ndim = input.type.ndim if axis is None: - axis = range(input_ndim) + axis = list(range(input_ndim)) elif isinstance(axis, (int, numpy.integer)): axis = [axis] elif isinstance(axis, numpy.ndarray) and axis.ndim == 0: @@ -3080,9 +3080,9 @@ """ if axes is None: - axes = range((x.ndim - 1), -1, -1) + axes = list(range((x.ndim - 1), -1, -1)) ret = DimShuffle(x.broadcastable, axes, inplace=False)(x) - if x.name and axes == range((x.ndim - 1), -1, -1): + if x.name and axes == list(range((x.ndim - 1), -1, -1)): ret.name = x.name + '.T' return ret @@ -3172,7 +3172,7 @@ # x = unbroadcast(x, *range(x.type.ndim)) inputs = [x, axis, splits] - outputs = [x.type() for i in xrange(self.len_splits)] + outputs = [x.type() for i in range(self.len_splits)] return Apply(self, inputs, outputs) @@ -3205,7 +3205,7 @@ general_key = [slice(None, None, None) for s in x.shape] lower_idx = 0 - for i in xrange(self.len_splits): + for i in range(self.len_splits): upper_idx = lower_idx + splits[i] general_key[axis] = slice(lower_idx, upper_idx, None) outputs[i][0] = x.__getitem__(general_key).copy() @@ -3251,7 +3251,7 @@ def __init__(self, *axis): self.axis = dict(axis) - for axis, broad in self.axis.iteritems(): + for axis, broad in self.axis.items(): assert isinstance(axis, (numpy.integer, int)), ( "Rebroadcast need integers axis. Got ", axis) @@ -3259,7 +3259,7 @@ return type(self) == type(other) and self.axis == other.axis def __hash__(self): - items = self.axis.items() + items = list(self.axis.items()) items.sort() # no ambiguity because each item key is unique return hash(type(self)) ^ hash(tuple(items)) @@ -3268,14 +3268,14 @@ broadcast_pattern = [] else: broadcast_pattern = ['?' for i - in xrange(1 + numpy.max(self.axis.keys()))] - for k, v in self.axis.iteritems(): + in range(1 + numpy.max(list(self.axis.keys())))] + for k, v in self.axis.items(): broadcast_pattern[k] = str(int(v)) return '%s{%s}' % (self.__class__.__name__, ','.join(broadcast_pattern)) def make_node(self, x): - if self.axis.keys() and (x.ndim <= numpy.max(self.axis.keys())): + if list(self.axis.keys()) and (x.ndim <= numpy.max(list(self.axis.keys()))): raise ValueError('Trying to rebroadcast nonexistant dimension') t = x.type.__class__(dtype=x.type.dtype, broadcastable=[self.axis.get(i, b) @@ -3286,7 +3286,7 @@ def perform(self, node, inp, out_): x, = inp out, = out_ - for axis, value in self.axis.iteritems(): + for axis, value in self.axis.items(): if value and x.shape[axis] != 1: raise ValueError('Dimension %s in Rebroadcast\'s input was' ' supposed to be 1 (got %s instead)' % @@ -3298,13 +3298,13 @@ gz, = grads # restore the broadcasting pattern of the input return Rebroadcast(*[(axis, x.type.broadcastable[axis]) - for axis, value in self.axis.iteritems()])(gz), + for axis, value in self.axis.items()])(gz), def infer_shape(self, node, ishapes): assert len(ishapes) == 1 l = [] one = constant(1) - for ax in xrange(len(ishapes[0])): + for ax in range(len(ishapes[0])): if self.axis.get(ax, False): l.append(one) else: @@ -3348,7 +3348,7 @@ optimization. """ rval = Rebroadcast(*[(i, broadcastable[i]) - for i in xrange(len(broadcastable))])(x) + for i in range(len(broadcastable))])(x) return theano.tensor.opt.apply_rebroadcast_opt(rval) @@ -3462,7 +3462,7 @@ else: # These unbroadcasts are for the gradient... not sure exactly # why... - as_tensor_variable_args = [unbroadcast(x, *range(x.type.ndim)) + as_tensor_variable_args = [unbroadcast(x, *list(range(x.type.ndim))) for x in as_tensor_variable_args] # When the axis may vary, no dimension can be guaranteed to be # broadcastable. @@ -3530,7 +3530,7 @@ assert len(shape) == n_dim out_shapes = [] - for dim in xrange(n_dim): + for dim in range(n_dim): # we have to deal with 2 possible cases in here : # a) we are dealing with the dimension for which we join # (called t_side from true side of the if, where the if @@ -3632,7 +3632,7 @@ """ _t = as_tensor_variable(t) - pattern = ['x'] * n_ones + [i for i in xrange(_t.type.ndim)] + pattern = ['x'] * n_ones + [i for i in range(_t.type.ndim)] return DimShuffle(_t.broadcastable, pattern)(_t) @@ -3644,7 +3644,7 @@ """ _t = as_tensor_variable(t) - pattern = [i for i in xrange(_t.type.ndim)] + ['x'] * n_ones + pattern = [i for i in range(_t.type.ndim)] + ['x'] * n_ones return DimShuffle(_t.broadcastable, pattern)(_t) @@ -3669,14 +3669,13 @@ # See ticket #660 if numpy.all([ # in case there is direct int in tensors. - isinstance(t, (numpy.number, float, int, python_complex, - long)) or + isinstance(t, (numpy.number, float, int, python_complex)) or (isinstance(t, Variable) and isinstance(t.type, TensorType) and t.ndim == 0) for t in tensors]): # in case there is direct int - tensors = map(as_tensor_variable, tensors) + tensors = list(map(as_tensor_variable, tensors)) dtype = scal.upcast(*[i.dtype for i in tensors]) return theano.tensor.opt.MakeVector(dtype)(*tensors) return join(0, *[shape_padleft(t, 1) for t in tensors]) @@ -3799,7 +3798,7 @@ shp_list = shp_orig if hasattr(shp_orig, "ndim") and shp_orig.ndim == 0: shp_list = [shp_orig] - for index in xrange(self.ndim): + for index in range(self.ndim): y = shp_list[index] y = as_tensor_variable(y) # Try to see if we can infer that y has a constant value of 1. @@ -3886,7 +3885,7 @@ return [requ] else: oshape = [] - for i in xrange(self.ndim): + for i in range(self.ndim): default_os_i = theano.tensor.opt.Shape_i(i)(node.outputs[0]) try: os_i = get_scalar_constant_value(node.inputs[1][i]).item() @@ -4173,7 +4172,7 @@ return self.__class__.__name__ def make_node(self, start, stop, step): - start, stop, step = map(as_tensor_variable, (start, stop, step)) + start, stop, step = list(map(as_tensor_variable, (start, stop, step))) assert start.ndim == 0 assert stop.ndim == 0 assert step.ndim == 0 @@ -4237,7 +4236,7 @@ if stop is None: start, stop = 0, start - start, stop, step = map(as_tensor_variable, (start, stop, step)) + start, stop, step = list(map(as_tensor_variable, (start, stop, step))) # If dtype is not provided, infer it from the other arguments if dtype is None: dtype = scal.upcast(start.type.dtype, stop.type.dtype, step.type.dtype) @@ -4324,7 +4323,7 @@ # Compute the broadcastable pattern of the output out_broadcastable = [xb and yb for xb, yb in - izip(x.type.broadcastable, y.type.broadcastable)] + zip(x.type.broadcastable, y.type.broadcastable)] out_type = tensor(dtype=x.type.dtype, broadcastable=out_broadcastable) inputlist = [x, y, inverse] @@ -4366,17 +4365,17 @@ xs0 = x.shape[0] ys0 = y.shape[0] if xs0 == ys0: - for i in xrange(xs0): + for i in range(xs0): self._rec_perform(node, x[i], y[i], inverse, out[i], curdim + 1) elif ys0 == 1 and node.inputs[1].type.broadcastable[curdim]: # Broadcast y - for i in xrange(xs0): + for i in range(xs0): self._rec_perform(node, x[i], y[0], inverse, out[i], curdim + 1) elif xs0 == 1 and node.inputs[0].type.broadcastable[curdim]: # Broadcast x - for i in xrange(ys0): + for i in range(ys0): self._rec_perform(node, x[0], y[i], inverse, out[i], curdim + 1) else: @@ -4391,7 +4390,7 @@ # Make sure the output is big enough out_s = [] - for xdim, ydim in izip(x_s, y_s): + for xdim, ydim in zip(x_s, y_s): if xdim == ydim: RefactoringTool: No changes to ./theano/tensor/__init__.py RefactoringTool: Refactored ./theano/sparse/type.py outdim = xdim elif xdim == 1: @@ -4427,7 +4426,7 @@ # If x has been broadcasted along some axes, we need to sum # the gradient over these axes, but keep the dimension (as # broadcastable) - broadcasted_dims = [dim for dim in xrange(gz.type.ndim) + broadcasted_dims = [dim for dim in range(gz.type.ndim) if x.type.broadcastable[dim] and not gz.type.broadcastable[dim]] gx = Sum(axis=broadcasted_dims)(gx) @@ -4436,7 +4435,7 @@ # so we need to put them back. newdims = [] i = 0 - for dim in xrange(gz.type.ndim): + for dim in range(gz.type.ndim): if dim in broadcasted_dims: newdims.append('x') else: @@ -4514,7 +4513,7 @@ # graph. See Dot22 in tensor.blas for details. def make_node(self, *inputs): - inputs = map(as_tensor_variable, inputs) + inputs = list(map(as_tensor_variable, inputs)) if len(inputs) != 2: raise TypeError( @@ -4639,7 +4638,7 @@ input_values = [iv0, iv1] eval_point_values = [ev0, ev1] - for i in xrange(2): + for i in range(2): if eval_point_values[i] is not None and \ input_values[i].shape != eval_point_values[i].shape: raise ValueError('input ' + str(i) + ' and eval_point ' + @@ -4912,10 +4911,10 @@ 'equal to b.ndim (b.ndim=%i, max(axes[1])=%i).' % (b.ndim, numpy.max(numpy.array(b_axes)))) - a_order = (tuple(x for x in tuple(xrange(a.ndim)) if x not in a_axes) + a_order = (tuple(x for x in tuple(range(a.ndim)) if x not in a_axes) + a_axes) b_order = (b_axes - + tuple(x for x in tuple(xrange(b.ndim)) if x not in b_axes)) + + tuple(x for x in tuple(range(b.ndim)) if x not in b_axes)) a_shuffled = a.dimshuffle(a_order) b_shuffled = b.dimshuffle(b_order) @@ -4983,10 +4982,14 @@ return Apply(self, [x], [tensor(dtype=x.dtype, broadcastable=[False] * (x.ndim - 1))]) - def perform(self, node, (x,), (z,)): + def perform(self, node, xxx_todo_changeme, xxx_todo_changeme1): + (x,) = xxx_todo_changeme + (z,) = xxx_todo_changeme1 z[0] = x.diagonal(self.offset, self.axis1, self.axis2) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme2, xxx_todo_changeme3): + (x,) = xxx_todo_changeme2 + (gz,) = xxx_todo_changeme3 return [grad_not_implemented(self, 0, x)] def infer_shape(self, node, shapes): @@ -5032,10 +5035,12 @@ return Apply(self, [diag], [matrix(dtype=diag.dtype)]) - def perform(self, node, inputs, (z,)): + def perform(self, node, inputs, xxx_todo_changeme4): + (z,) = xxx_todo_changeme4 z[0] = numpy.diag(inputs[0]) - def grad(self, inputs, (gz,)): + def grad(self, inputs, xxx_todo_changeme5): + (gz,) = xxx_todo_changeme5 return [diagonal(gz)] def infer_shape(self, nodes, shapes): @@ -5082,6 +5087,6 @@ (2, 2, 4, 4) """ if isinstance(arg, (tuple, list)): - return stack(*map(stacklists, arg)) + return stack(*list(map(stacklists, arg))) else: return arg --- ./theano/sparse/type.py (original) +++ ./theano/sparse/type.py (refactored) @@ -60,12 +60,12 @@ raise NotImplementedError('unsupported dtype "%s" not in list' % dtype, list(self.dtype_set)) - assert isinstance(format, basestring) + assert isinstance(format, str) if format in self.format_cls: self.format = format else: raise NotImplementedError('unsupported format "%s" not in list' % - format, self.format_cls.keys()) + format, list(sRefactoringTool: No changes to ./theano/sparse/tests/test_utils.py RefactoringTool: No changes to ./theano/sparse/tests/test_sp2.py RefactoringTool: No changes to ./theano/sparse/tests/test_opt.py RefactoringTool: Refactored ./theano/sparse/tests/test_basic.py RefactoringTool: No changes to ./theano/sparse/sharedvar.py RefactoringTool: Refactored ./theano/sparse/sandbox/test_sp.py elf.format_cls.keys())) def filter(self, value, strict=False, allow_downcast=None): if isinstance(value, self.format_cls[self.format])\ --- ./theano/sparse/tests/test_basic.py (original) +++ ./theano/sparse/tests/test_basic.py (refactored) @@ -148,7 +148,7 @@ if unsorted_indices: for idx in range(n): d = data[idx] - d = d[range(d.shape[0])] + d = d[list(range(d.shape[0]))] assert not d.has_sorted_indices data[idx] = d if explicit_zero: @@ -184,11 +184,15 @@ x = as_sparse_variable(x) return gof.Apply(self, [x], [x.type()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme, xxx_todo_changeme1): + (x, ) = xxx_todo_changeme + (out, ) = xxx_todo_changeme1 assert _is_sparse(x) out[0] = -x - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme2, xxx_todo_changeme3): + (x,) = xxx_todo_changeme2 + (gz,) = xxx_todo_changeme3 assert _is_sparse_variable(x) and _is_sparse_variable(gz) if self.structured: return sp_ones_like(x) * dense_from_sparse(gz), @@ -1052,7 +1056,7 @@ mat = numpy.asarray(numpy.random.randn(N, K), dense_dtype) theano_times = [] scipy_times = [] - for i in xrange(5): + for i in range(5): t0 = time.time() theano_result = f(spmat, mat) t1 = time.time() @@ -1777,7 +1781,7 @@ def test_op(self): for format in sparse.sparse_formats: - for shape in zip(range(5, 9), range(3, 7)[::-1]): + for shape in zip(list(range(5, 9)), list(range(3, 7))[::-1]): variable, data = sparse_random_inputs(format, shape=shape) f = theano.function(variable, self.op(*variable)) @@ -1788,7 +1792,7 @@ def test_infer_shape(self): for format in sparse.sparse_formats: - for shape in zip(range(5, 9), range(3, 7)[::-1]): + for shape in zip(list(range(5, 9)), list(range(3, 7))[::-1]): variable, data = sparse_random_inputs(format, shape=shape) self._compile_and_check(variable, [self.op(*variable)], @@ -1797,7 +1801,7 @@ def test_grad(self): for format in sparse.sparse_formats: - for shape in zip(range(5, 9), range(3, 7)[::-1]): + for shape in zip(list(range(5, 9)), list(range(3, 7))[::-1]): variable, data = sparse_random_inputs(format, shape=shape) verify_grad_sparse( self.op, @@ -1812,7 +1816,7 @@ def test_op(self): for format in sparse.sparse_formats: - for shape in zip(range(5, 9), range(3, 7)[::-1]): + for shape in zip(list(range(5, 9)), list(range(3, 7))[::-1]): variable, data = sparse_random_inputs(format, shape=shape) data[0][0, 0] = data[0][1, 1] = 0 @@ -1831,7 +1835,7 @@ def test_grad(self): for format in sparse.sparse_formats: - for shape in zip(range(5, 9), range(3, 7)[::-1]): + for shape in zip(list(range(5, 9)), list(range(3, 7))[::-1]): variable, data = sparse_random_inputs(format, shape=shape) verify_grad_sparse( self.op, --- ./theano/sparse/sandbox/test_sp.py (original) +++ ./theano/sparse/sandbox/test_sp.py (refactored) @@ -178,11 +178,11 @@ ntime1 = time.time() refout = numpy.zeros((bsize,nkern,outshp[1],outshp[2])) patch = numpy.zeros((kshp[0],kshp[1])) - for b in xrange(bsize): - for k in xrange(nkern): + for b in range(bsize): + for k in range(nkern): pixi = 0 # pixel index in raster order - RefactoringTool: Refactored ./theano/sparse/sandbox/sp2.py RefactoringTool: Refactored ./theano/sparse/sandbox/sp.py for j in xrange(outshp[1]): - for i in xrange(outshp[2]): + for j in range(outshp[1]): + for i in range(outshp[2]): n = j * ss[0] m = i * ss[1] patch = zeropad_img[b,n:n+kshp[0],m:m+kshp[1]] --- ./theano/sparse/sandbox/sp2.py (original) +++ ./theano/sparse/sandbox/sp2.py (refactored) @@ -70,7 +70,9 @@ x = as_sparse_variable(x) return gof.Apply(self, [x], [x.type()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme, xxx_todo_changeme1): + (x, ) = xxx_todo_changeme + (out, ) = xxx_todo_changeme1 assert _is_sparse(x) out[0] = x.copy() out[0].data = numpy.asarray(numpy.random.poisson(out[0].data), @@ -124,12 +126,16 @@ return gof.Apply(self, [n, p, shape], [SparseType(dtype=self.dtype, format=self.format).make_variable()]) - def perform(self, node, (n, p, shape, ), (out, )): + def perform(self, node, xxx_todo_changeme2, xxx_todo_changeme3): + (n, p, shape, ) = xxx_todo_changeme2 + (out, ) = xxx_todo_changeme3 binomial = numpy.random.binomial(n, p, size=shape) csx_matrix = getattr(scipy.sparse, self.format + '_matrix') out[0] = csx_matrix(binomial, dtype=self.dtype) - def grad(self, (n, p, shape, ), (gz,)): + def grad(self, xxx_todo_changeme4, xxx_todo_changeme5): + (n, p, shape, ) = xxx_todo_changeme4 + (gz,) = xxx_todo_changeme5 return None, None, None def infer_shape(self, node, ins_shapes): @@ -178,7 +184,9 @@ return gof.Apply(self, [n, p], [p.type()]) - def perform(self, node, (n, p), (out, )): + def perform(self, node, xxx_todo_changeme6, xxx_todo_changeme7): + (n, p) = xxx_todo_changeme6 + (out, ) = xxx_todo_changeme7 assert _is_sparse(p) if p.format != 'csr': @@ -187,14 +195,14 @@ out[0] = p.copy() if n.ndim == 0: - for i in xrange(p.shape[0]): + for i in range(p.shape[0]): k, l = p.indptr[i], p.indptr[i + 1] out[0].data[k:l] = numpy.random.multinomial(n, p.data[k:l]) elif n.ndim == 1: if n.shape[0] != p.shape[0]: raise ValueError('The number of element of n must be ' 'the same as the number of row of p.') - for i in xrange(p.shape[0]): + for i in range(p.shape[0]): k, l = p.indptr[i], p.indptr[i + 1] out[0].data[k:l] = numpy.random.multinomial(n[i], p.data[k:l]) --- ./theano/sparse/sandbox/sp.py (original) +++ ./theano/sparse/sandbox/sp.py (refactored) @@ -43,18 +43,20 @@ """ @staticmethod - def sparse_eval(inshp, kshp, nkern, (dx, dy)=(1, 1), mode='valid'): + def sparse_eval(inshp, kshp, nkern, xxx_todo_changeme=(1, 1), mode='valid'): + (dx, dy) = xxx_todo_changeme return convolution_indices.evaluate(inshp, kshp, (dx, dy), nkern, mode=mode, ws=False) @staticmethod - def conv_eval(inshp, kshp, (dx, dy)=(1, 1), mode='valid'): + def conv_eval(inshp, kshp, xxx_todo_changeme1=(1, 1), mode='valid'): + (dx, dy) = xxx_todo_changeme1 return convolution_indices.evaluate(inshp, kshp, (dx, dy), mode=mode, ws=True) # img_shape and ker_shape are (height,width) @staticmethod - def evaluate(inshp, kshp, (dx, dy)=(1, 1), nkern=1, mode='valid', ws=True): + def evaluate(inshp, kshp, xxx_todo_changeme2=(1, 1), nkern=1, mode='valid', ws=True): """Build a sparse matrix which can be used for performing... * convolution: in this case, the dot product of this matrix with the input images will generate a stack of images @@ -80,6 +82,7 @@ :reRefactoringTool: Refactored ./theano/sparse/opt.py turns: the structure of a sparse matrix, and the logical dimensions of the image which will be the result of filtering. """ + (dx, dy) = xxx_todo_changeme2 N = numpy # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w) @@ -145,9 +148,9 @@ # values (order in which you write the values determines how # the vectorized data will get used later one) - for fmapi in xrange(inshp[0]): # loop over input features + for fmapi in range(inshp[0]): # loop over input features # loop over number of kernels (nkern=1 for weight sharing) - for n in xrange(nkern): + for n in range(nkern): # FOR EACH OUTPUT PIXEL... # loop over output image height @@ -230,8 +233,8 @@ kmap = N.zeros(ntaps, dtype='int') k = 0 #print 'TEMPORARY BUGFIX: REMOVE !!!' - for j in xrange(spmat.shape[1]): - for i_idx in xrange(spmat.indptr[j], spmat.indptr[j + 1]): + for j in range(spmat.shape[1]): + for i_idx in range(spmat.indptr[j], spmat.indptr[j + 1]): if spmat.data[i_idx] != 0: # this is == spmat[i,j] - 1 kmap[k] = spmat.data[i_idx] - 1 @@ -252,8 +255,9 @@ return rval - def perform(self, node, (inshp, kshp),\ - (out_indices, out_indptr, spmat_shape)): + def perform(self, node, xxx_todo_changeme3, xxx_todo_changeme4): + (inshp, kshp) = xxx_todo_changeme3 + (out_indices, out_indptr, spmat_shape) = xxx_todo_changeme4 indices, indptr, spmatshp, outshp = self.evaluate(inshp, kshp) out_indices[0] = indices out_indptr[0] = indptr --- ./theano/sparse/opt.py (original) +++ ./theano/sparse/opt.py (refactored) @@ -1,4 +1,4 @@ -from itertools import izip + import numpy import scipy @@ -24,7 +24,7 @@ # csm.owner.inputs could be broadcastable. In that case, we have # to adjust the broadcasting flag here. ret_var = [theano.tensor.patternbroadcast(i, o.broadcastable) - for i, o in izip(csm.owner.inputs, node.outputs)] + for i, o in zip(csm.owner.inputs, node.outputs)] return ret_var return False @@ -105,7 +105,9 @@ [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))]) return r - def perform(self, node, (a_val, a_ind, a_ptr, a_nrows, b), (out,)): + def perform(self, node, xxx_todo_changeme, xxx_todo_changeme1): + (a_val, a_ind, a_ptr, a_nrows, b) = xxx_todo_changeme + (out,) = xxx_todo_changeme1 a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False) @@ -113,7 +115,7 @@ out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype) assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense - def c_code(self, node, name, (a_val, a_ind, a_ptr, a_nrows, b), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme2, xxx_todo_changeme3, sub): # C-implementation of the dot product of the sparse matrix A and matrix # B. # @param a_val: non-zero values of the sparse matrix @@ -127,6 +129,8 @@ # @param sub: TODO, not too sure, something to do with weave probably + (a_val, a_ind, a_ptr, a_nrows, b) = xxx_todo_changeme2 + (z,) = xxx_todo_changeme3 if node.inputs[0].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for a_val') if node.inputs[4].type.dtype in ('complex64', 'complex128'): @@ -296,7 +300,9 @@ b.type.broadcastable[1]))]) return r - def perform(self, node, (a_val, a_ind, a_ptr, b), (out,)): + def perform(self, node, xxx_todo_changeme4, xxx_todo_changeme5): + (a_val, a_ind, a_ptr, b) = xxx_tRefactoringTool: Refactored ./theano/sparse/basic.py odo_changeme4 + (out,) = xxx_todo_changeme5 a = scipy.sparse.csr_matrix((a_val, a_ind, a_ptr), (len(a_ptr) - 1, b.shape[0]), copy=True) # use view_map before setting this to False @@ -305,7 +311,7 @@ # scipy 0.7 automatically converts to dense, but not .6 sometimes assert _is_dense(out[0]) - def c_code(self, node, name, (a_val, a_ind, a_ptr, b), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme6, xxx_todo_changeme7, sub): """ C-implementation of the dot product of the sparse matrix A and matrix B. @@ -319,7 +325,8 @@ @param z: return value @param sub: TODO, not too sure, something to do with weave probably """ - # retrieve dtype number + (a_val, a_ind, a_ptr, b) = xxx_todo_changeme6 + (z,) = xxx_todo_changeme7 typenum_z = tensor.TensorType(self.dtype_out, []).dtype_specs()[2] if node.inputs[0].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for a_val') @@ -760,9 +767,11 @@ return gof.Apply(self, [a_val, a_ind, a_ptr, a_dim, b_val, b_ind, b_ptr, b_dim], [b_val.type()]) - def c_code(self, node, name, (a_val, a_ind, a_ptr, a_dim, - b_val, b_ind, b_ptr, b_dim), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme8, xxx_todo_changeme9, sub): # retrieve dtype number + (a_val, a_ind, a_ptr, a_dim, + b_val, b_ind, b_ptr, b_dim) = xxx_todo_changeme8 + (z,) = xxx_todo_changeme9 typenum_z = node.outputs[0].type.dtype_specs()[2] if node.inputs[0].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for a_val') @@ -917,9 +926,10 @@ #def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)): # return NotImplementedError() - def c_code(self, node, name, (_data, _indices, _indptr, _b,), - (_zout, ), sub): - + def c_code(self, node, name, xxx_todo_changeme10, xxx_todo_changeme11, sub): + + (_data, _indices, _indptr, _b,) = xxx_todo_changeme10 + (_zout, ) = xxx_todo_changeme11 if node.inputs[0].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for a') if node.inputs[3].type.dtype in ('complex64', 'complex128'): @@ -1033,9 +1043,10 @@ #def perform(self, node, (a_data, a_indices, a_indptr, b), (out,)): # return NotImplemented() - def c_code(self, node, name, (_data, _indices, _indptr, _b,), - (_zout, ), sub): - + def c_code(self, node, name, xxx_todo_changeme12, xxx_todo_changeme13, sub): + + (_data, _indices, _indptr, _b,) = xxx_todo_changeme12 + (_zout, ) = xxx_todo_changeme13 if node.inputs[0].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for a') if node.inputs[3].type.dtype in ('complex64', 'complex128'): --- ./theano/sparse/basic.py (original) +++ ./theano/sparse/basic.py (refactored) @@ -485,7 +485,8 @@ return gof.Apply(self, [csm], [data, tensor.ivector(), tensor.ivector(), tensor.ivector()]) - def perform(self, node, (csm,), out): + def perform(self, node, xxx_todo_changeme, out): + (csm,) = xxx_todo_changeme if self.kmap is None: out[0][0] = csm.data else: @@ -498,7 +499,7 @@ out[2][0] = theano._asarray(csm.indptr, dtype='int32') out[3][0] = theano._asarray(csm.shape, dtype='int32') - def grad(self, (csm,), g): + def grad(self, xxx_todo_changeme1, g): # g[1:] is all integers, so their Jacobian in this op # is 0. We thus don't need to worry about what their values @@ -508,6 +509,7 @@ # any gradient anywhere. but we know that at least one of # g[1:] is connected, or this grad method wouldn't have been # called, so we should report zeros + (csm,) = xxx_todo_changeme1 if isinstance(g[0].type, DisconnectedType): return [csm.zeros_like()] @@ -660,8 +662,10 @@ [SparseType(dtype=data.type.dtype, format=self.format).make_variable()]) - def perform(self, node, (data, indices, indptr, shape), (out,)): + def perform(self, node, xxx_todo_changeme2, xxx_todo_changeme3): # for efficiency, if remap does nothing, then do not apply it + (data, indices, indptr, shape) = xxx_todo_changeme2 + (out,) = xxx_todo_changeme3 if self.kmap is not None: data = data[self.kmap] @@ -688,7 +692,9 @@ def connection_pattern(self, node): return [[True], [False], [False], [False]] - def grad(self, (x_data, x_indices, x_indptr, x_shape), (g_out,)): + def grad(self, xxx_todo_changeme4, xxx_todo_changeme5): + (x_data, x_indices, x_indptr, x_shape) = xxx_todo_changeme4 + (g_out,) = xxx_todo_changeme5 g_data, g_indices, g_indptr, g_shape = csm_properties(g_out) # unpack the data vector and wrap it as a 1d TensorType g_data = csm_grad(self.kmap)(x_data, x_indices, x_indptr, x_shape, @@ -746,8 +752,10 @@ return gof.Apply(self, [x_data, x_indices, x_indptr, x_shape, g_data, g_indices, g_indptr, g_shape], [gout_data]) - def perform(self, node, (x_data, x_indices, x_indptr, x_shape, - g_data, g_indices, g_indptr, g_shape), (g_out,)): + def perform(self, node, xxx_todo_changeme6, xxx_todo_changeme7): + (x_data, x_indices, x_indptr, x_shape, + g_data, g_indices, g_indptr, g_shape) = xxx_todo_changeme6 + (g_out,) = xxx_todo_changeme7 if len(x_indptr) - 1 == x_shape[0]: sp_dim = x_shape[1] else: @@ -807,7 +815,9 @@ self, [x], [SparseType(dtype=self.out_type, format=x.format).make_variable()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme8, xxx_todo_changeme9): + (x, ) = xxx_todo_changeme8 + (out, ) = xxx_todo_changeme9 assert _is_sparse(x) out[0] = x.astype(self.out_type) @@ -879,17 +889,21 @@ broadcastable=(False, False) ).make_variable()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme10, xxx_todo_changeme11): + (x, ) = xxx_todo_changeme10 + (out, ) = xxx_todo_changeme11 if _is_dense(x): - print >> sys.stderr, ( + print(( "WARNING: You just called DenseFromSparse on a dense matrix." - ) + ), file=sys.stderr) out[0] = x else: out[0] = x.toarray() assert _is_dense(out[0]) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme12, xxx_todo_changeme13): + (x, ) = xxx_todo_changeme12 + (gz, ) = xxx_todo_changeme13 if self.sparse_grad: left = sp_ones_like(x) right = gz @@ -963,10 +977,14 @@ format=self.format ).make_variable()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme14, xxx_todo_changeme15): + (x, ) = xxx_todo_changeme14 + (out, ) = xxx_todo_changeme15 out[0] = SparseType.format_cls[self.format](x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme16, xxx_todo_changeme17): + (x, ) = xxx_todo_changeme16 + (gz, ) = xxx_todo_changeme17 gx = dense_from_sparse(gz) gx = tensor.patternbroadcast(gx, x.broadcastable) return gx, @@ -1079,7 +1097,9 @@ return gof.Apply(self, input_op, [x.type()]) - def perform(self, node, (x, start1, stop1, start2, stop2), (out, )): + def perform(self, node, xxx_todo_changeme18, xxx_todo_changeme19): + (x, start1, stop1, start2, stop2) = xxx_todo_changeme18 + (out, ) = xxx_todo_changeme19 assert _is_sparse(x) out[0] = x[start1:stop1, start2:stop2] @@ -1137,7 +1157,9 @@ return gof.Apply(self, input_op, [tensor.scalar(dtype=x.dtype)]) - def perform(self, node, (x, ind1, ind2), (out, )): + def perform(self, node, xxx_todo_changeme20, xxx_todo_changeme21): + (x, ind1, ind2) = xxx_todo_changeme20 + (out, ) = xxx_todo_changeme21 assert _is_sparse(x) out[0] = theano._asarray(x[ind1, ind2], x.dtype) @@ -1183,11 +1205,15 @@ format=self.format_map[x.type.format] ).make_variable()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme22, xxx_todo_changeme23): + (x, ) = xxx_todo_changeme22 + (out, ) = xxx_todo_changeme23 assert _is_sparse(x) out[0] = x.transpose() - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme24, xxx_todo_changeme25): + (x,) = xxx_todo_changeme24 + (gz,) = xxx_todo_changeme25 assert _is_sparse_variable(x) and _is_sparse_variable(gz) return transpose(gz), @@ -1219,11 +1245,15 @@ x = as_sparse_variable(x) return gof.Apply(self, [x], [x.type()]) - def perform(self, node, (x, ), (out, )): + def perform(self, node, xxx_todo_changeme26, xxx_todo_changeme27): + (x, ) = xxx_todo_changeme26 + (out, ) = xxx_todo_changeme27 assert _is_sparse(x) out[0] = -x - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme28, xxx_todo_changeme29): + (x,) = xxx_todo_changeme28 + (gz,) = xxx_todo_changeme29 assert _is_sparse_variable(x) and _is_sparse_variable(gz) return -gz, @@ -1257,19 +1287,23 @@ raise ValueError('x was not a csc matrix') return gof.Apply(self, [x, s], [x.type()]) - def perform(self, node, (x, s), (z,)): + def perform(self, node, xxx_todo_changeme30, xxx_todo_changeme31): + (x, s) = xxx_todo_changeme30 + (z,) = xxx_todo_changeme31 M, N = x.shape assert x.format == 'csc' assert s.shape == (N, ) y = x.copy() - for j in xrange(0, N): + for j in range(0, N): y.data[y.indptr[j]: y.indptr[j + 1]] *= s[j] z[0] = y - def grad(self, (x, s), (gz,)): + def grad(self, xxx_todo_changeme32, xxx_todo_changeme33): + (x, s) = xxx_todo_changeme32 + (gz,) = xxx_todo_changeme33 return [col_scale(gz, s), sp_sum(x * gz, axis=0)] def infer_shape(self, node, ins_shapes): @@ -1304,7 +1338,9 @@ def make_node(self, x, s): return gof.Apply(self, [x, s], [x.type()]) - def perform(self, node, (x, s), (z,)): + def perform(self, node, xxx_todo_changeme34, xxx_todo_changeme35): + (x, s) = xxx_todo_changeme34 + (z,) = xxx_todo_changeme35 M, N = x.shape assert x.format == 'csc' assert s.shape == (M, ) @@ -1314,13 +1350,15 @@ y_data = x.data.copy() - for j in xrange(0, N): - for i_idx in xrange(indptr[j], indptr[j + 1]): + for j in range(0, N): + for i_idx in range(indptr[j], indptr[j + 1]): y_data[i_idx] *= s[indices[i_idx]] z[0] = scipy.sparse.csc_matrix((y_data, indices, indptr), (M, N)) - def grad(self, (x, s), (gz,)): + def grad(self, xxx_todo_changeme36, xxx_todo_changeme37): + (x, s) = xxx_todo_changeme36 + (gz,) = xxx_todo_changeme37 return [row_scale(gz, s), sp_sum(x * gz, axis=1)] def infer_shape(self, node, ins_shapes): @@ -1423,13 +1461,17 @@ z = tensor.TensorType(broadcastable=b, dtype=x.dtype)() return gof.Apply(self, [x], [z]) - def perform(self, node, (x,), (z,)): + def perform(self, node, xxx_todo_changeme38, xxx_todo_changeme39): + (x,) = xxx_todo_changeme38 + (z,) = xxx_todo_changeme39 if self.axis == None: z[0] = numpy.asarray(x.sum()) else: z[0] = numpy.asarray(x.sum(self.axis)).ravel() - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme40, xxx_todo_changeme41): + (x,) = xxx_todo_changeme40 + (gz,) = xxx_todo_changeme41 if x.dtype not in continuous_dtypes: return [None] @@ -1500,13 +1542,17 @@ return gof.Apply(self, [x], [tensor.tensor(broadcastable=(False,), dtype=x.dtype)]) - def perform(self, node, (x,), (z,)): + def perform(self, node, xxx_todo_changeme42, xxx_todo_changeme43): + (x,) = xxx_todo_changeme42 + (z,) = xxx_todo_changeme43 N, M = x.shape if N != M: raise ValueError('Diag only apply on square matrix') z[0] = x.diagonal() - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme44, xxx_todo_changeme45): + (x,) = xxx_todo_changeme44 + (gz,) = xxx_todo_changeme45 return [square_diagonal(gz)] def infer_shape(self, nodes, shapes): @@ -1542,18 +1588,20 @@ return gof.Apply(self, [diag], [SparseType(dtype=diag.dtype, format='csc')()]) - def perform(self, node, inputs, (z,)): + def perform(self, node, inputs, xxx_todo_changeme46): + (z,) = xxx_todo_changeme46 diag, o_shape = inputs[0], inputs[0].shape * 2 N = len(diag) data = diag[:N] - indices = range(N) - indptr = range(N + 1) + indices = list(range(N)) + indptr = list(range(N + 1)) tup = (data, indices, indptr) z[0] = scipy.sparse.csc_matrix(tup, copy=True) - def grad(self, inputs, (gz,)): + def grad(self, inputs, xxx_todo_changeme47): + (gz,) = xxx_todo_changeme47 return [diag(gz)] def infer_shape(self, nodes, shapes): @@ -1593,7 +1641,9 @@ def make_node(self, x): return gof.Apply(self, [x], [x.type()]) - def perform(self, node, (x, ), (z, )): + def perform(self, node, xxx_todo_changeme48, xxx_todo_changeme49): + (x, ) = xxx_todo_changeme48 + (z, ) = xxx_todo_changeme49 if self.inplace: z[0] = x.sort_indices() else: @@ -1653,7 +1703,7 @@ return self.__class__.__name__ def make_node(self, x, y): - x, y = map(as_sparse_variable, [x, y]) + x, y = list(map(as_sparse_variable, [x, y])) if x.type.dtype != y.type.dtype: raise NotImplementedError() if x.type.format != y.type.format: @@ -1664,12 +1714,16 @@ format=x.type.format ).make_variable()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme50, xxx_todo_changeme51): + (x, y) = xxx_todo_changeme50 + (out, ) = xxx_todo_changeme51 assert _is_sparse(x) and _is_sparse(y) assert x.shape == y.shape out[0] = x + y - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme52, xxx_todo_changeme53): + (x, y) = xxx_todo_changeme52 + (gz,) = xxx_todo_changeme53 assert _is_sparse_variable(x) and _is_sparse_variable(y) assert _is_sparse_variable(gz) return gz, gz @@ -1701,7 +1755,7 @@ return hash(type(self)) def make_node(self, x, y): - x, y = map(as_sparse_variable, [x, y]) + x, y = list(map(as_sparse_variable, [x, y])) if x.type.dtype != y.type.dtype: raise NotImplementedError() if x.type.format != y.type.format: @@ -1711,14 +1765,17 @@ [SparseType(dtype=x.type.dtype, format=x.type.format).make_variable()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme54, xxx_todo_changeme55): + (x, y) = xxx_todo_changeme54 + (out, ) = xxx_todo_changeme55 assert _is_sparse(x) and _is_sparse(y) assert x.shape == y.shape assert x.data.shape == y.data.shape out[0] = x.copy() out[0].data += y.data - def grad(self, inputs, (gz, )): + def grad(self, inputs, xxx_todo_changeme56): + (gz, ) = xxx_todo_changeme56 is_continuous = [(i.dtype in continuous_dtypes) for i in inputs] derivative = {True: gz, False: None} @@ -1782,7 +1839,9 @@ broadcastable=y.type.broadcastable ).make_variable()]) - def c_code(self, node, name, (_data, _indices, _indptr, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme57, xxx_todo_changeme58, sub): + (_data, _indices, _indptr, y) = xxx_todo_changeme57 + (z, ) = xxx_todo_changeme58 inplace = int(self.inplace) format = {'csc': 0, 'csr': 1}[self.format] code = """ @@ -1825,7 +1884,9 @@ """ % dict(locals(), **sub) return code - def perform(self, node, (data, indices, indptr, y), (out, )): + def perform(self, node, xxx_todo_changeme59, xxx_todo_changeme60): + (data, indices, indptr, y) = xxx_todo_changeme59 + (out, ) = xxx_todo_changeme60 assert _is_dense(y) if self.format == 'csr': @@ -1837,7 +1898,9 @@ # numpy.matrixlib.defmatrix.matrix object and not an ndarray. out[0] = theano._asarray(x + y, dtype=node.outputs[0].type.dtype) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme61, xxx_todo_changeme62): + (x, y) = xxx_todo_changeme61 + (gz,) = xxx_todo_changeme62 assert _is_sparse_variable(x) and _is_dense_variable(y) assert _is_dense_variable(gz) return sp_ones_like(x) * gz, gz @@ -1884,12 +1947,16 @@ [SparseType(dtype=x.type.dtype, format=x.type.format).make_variable()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme63, xxx_todo_changeme64): + (x, y) = xxx_todo_changeme63 + (out, ) = xxx_todo_changeme64 assert _is_sparse(x) and not _is_sparse(y) assert x.shape[1] == y.shape[0] out[0] = x.__class__(x + (x.toarray() != 0) * y) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme65, xxx_todo_changeme66): + (x, y) = xxx_todo_changeme65 + (gz,) = xxx_todo_changeme66 assert _is_sparse_variable(x) and not _is_sparse_variable(y) assert _is_sparse_variable(gz) return gz, sp_sum(gz, axis=0, sparse_grad=True) @@ -1989,7 +2056,9 @@ "Got %s and %s." % (str(x.type), str(y.type))) return gof.Apply(self, [x, y], [x.type()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme67, xxx_todo_changeme68): + (x, y) = xxx_todo_changeme67 + (out, ) = xxx_todo_changeme68 assert _is_sparse(x) and _is_sparse(y) assert len(x.shape) == 2 assert y.shape == x.shape @@ -1997,7 +2066,9 @@ # x * y calls dot... out[0] = x.multiply(y) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme69, xxx_todo_changeme70): + (x, y) = xxx_todo_changeme69 + (gz,) = xxx_todo_changeme70 return y * gz, x * gz def infer_shape(self, node, shapes): @@ -2044,7 +2115,9 @@ assert y.type.ndim <= 2 return gof.Apply(self, [x, y], [x.type()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme71, xxx_todo_changeme72): + (x, y) = xxx_todo_changeme71 + (out, ) = xxx_todo_changeme72 assert _is_sparse(x) and _is_dense(y) if len(y.shape) == 0: out[0] = x.copy() @@ -2065,8 +2138,8 @@ z = x.copy() z_data = z.data - for j in xrange(0, N): - for i_idx in xrange(indptr[j], indptr[j + 1]): + for j in range(0, N): + for i_idx in range(indptr[j], indptr[j + 1]): i = indices[i_idx] z_data[i_idx] *= y[i, j] out[0] = z @@ -2077,18 +2150,20 @@ z = x.copy() z_data = z.data - for i in xrange(0, M): - for j_idx in xrange(indptr[i], indptr[i + 1]): + for i in range(0, M): + for j_idx in range(indptr[i], indptr[i + 1]): j = indices[j_idx] z_data[j_idx] *= y[i, j] out[0] = z else: - print >> sys.stderr, ( + print(( "WARNING: crappy implementation of MulSD" - ), x.format + ), x.format, file=sys.stderr) out[0] = type(x)(x.toarray() * y) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme73, xxx_todo_changeme74): + (x, y) = xxx_todo_changeme73 + (gz,) = xxx_todo_changeme74 assert _is_sparse_variable(x) and _is_dense_variable(y) assert _is_sparse_variable(gz) return y * gz, x * gz @@ -2131,12 +2206,16 @@ [SparseType(dtype=x.type.dtype, format=x.type.format).make_variable()]) - def perform(self, node, (x, y), (out, )): + def perform(self, node, xxx_todo_changeme75, xxx_todo_changeme76): + (x, y) = xxx_todo_changeme75 + (out, ) = xxx_todo_changeme76 assert _is_sparse(x) and not _is_sparse(y) assert x.shape[1] == y.shape[0] out[0] = x.__class__(x.toarray() * y) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme77, xxx_todo_changeme78): + (x, y) = xxx_todo_changeme77 + (gz,) = xxx_todo_changeme78 assert _is_sparse_variable(x) and _is_dense_variable(y) assert _is_sparse_variable(gz) @@ -2241,7 +2320,8 @@ self, var, [SparseType(dtype=self.dtype, format=self.format).make_variable()]) - def perform(self, node, block, (out, )): + def perform(self, node, block, xxx_todo_changeme79): + (out, ) = xxx_todo_changeme79 for b in block: assert _is_sparse(b) out[0] = scipy.sparse.hstack(block, format=self.format, @@ -2251,7 +2331,8 @@ if out[0].dtype != self.dtype: out[0] = out[0].astype(self.dtype) - def grad(self, inputs, (gz, )): + def grad(self, inputs, xxx_todo_changeme80): + (gz, ) = xxx_todo_changeme80 is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes) for i in range(len(inputs))] @@ -2320,7 +2401,8 @@ :note: The grad implemented is regular, i.e. not structured. """ - def perform(self, node, block, (out, )): + def perform(self, node, block, xxx_todo_changeme81): + (out, ) = xxx_todo_changeme81 for b in block: assert _is_sparse(b) out[0] = scipy.sparse.vstack(block, format=self.format, @@ -2330,7 +2412,8 @@ if out[0].dtype != self.dtype: out[0] = out[0].astype(self.dtype) - def grad(self, inputs, (gz, )): + def grad(self, inputs, xxx_todo_changeme82): + (gz, ) = xxx_todo_changeme82 is_continuous = [(inputs[i].dtype in tensor.continuous_dtypes) for i in range(len(inputs))] @@ -2413,7 +2496,9 @@ def make_node(self, x): return gof.Apply(self, [x], [x.type()]) - def perform(self, node, (x,), (z,)): + def perform(self, node, xxx_todo_changeme83, xxx_todo_changeme84): + (x,) = xxx_todo_changeme83 + (z,) = xxx_todo_changeme84 if self.inplace: c = x else: @@ -2421,7 +2506,9 @@ c.eliminate_zeros() z[0] = c - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme85, xxx_todo_changeme86): + (x,) = xxx_todo_changeme85 + (gz,) = xxx_todo_changeme86 return [gz] def infer_shape(self, node, i0_shapes): @@ -2744,7 +2831,9 @@ shape, copy=False) out[0] = rval - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme87, xxx_todo_changeme88): + (x, y) = xxx_todo_changeme87 + (gz, ) = xxx_todo_changeme88 assert _is_sparse_variable(gz) assert _is_sparse_variable(x) @@ -2838,7 +2927,9 @@ [tensor.tensor(dtype_out, (False, b.type.broadcastable[1]))]) - def perform(self, node, (a, b), (out,)): + def perform(self, node, xxx_todo_changeme89, xxx_todo_changeme90): + (a, b) = xxx_todo_changeme89 + (out,) = xxx_todo_changeme90 if a.shape[1] != b.shape[0]: raise ValueError('shape mismatch in StructuredDot.perform', (a.shape, b.shape)) @@ -2880,10 +2971,12 @@ # theano._asarray function documentation. out[0] = theano._asarray(variable, str(variable.dtype)) - def grad(self, (a, b), (g_out,)): + def grad(self, xxx_todo_changeme91, xxx_todo_changeme92): # a is sparse, b is dense, g_out is dense # ga = g_out x b.T # gb = a.T x g_out + (a, b) = xxx_todo_changeme91 + (g_out,) = xxx_todo_changeme92 return [structured_dot_grad(a, b, g_out), structured_dot(a.T, g_out)] def infer_shape(self, node, shapes): @@ -2958,12 +3051,14 @@ return gof.Apply(self, [a_indices, a_indptr, b, g_ab], [tensor.tensor(g_ab.dtype, (False,))]) - def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)): + def perform(self, node, xxx_todo_changeme93, xxx_todo_changeme94): + (a_indices, a_indptr, b, g_ab) = xxx_todo_changeme93 + (out,) = xxx_todo_changeme94 g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype) - for j in xrange(len(a_indptr) - 1): + for j in range(len(a_indptr) - 1): ind0 = a_indptr[j] ind1 = a_indptr[j + 1] - for i_idx in xrange(ind0, ind1): + for i_idx in range(ind0, ind1): i = a_indices[i_idx] # Depending on the type of g_ab and b (sparse or dense), # the following dot product can result in a scalar or @@ -2977,8 +3072,10 @@ def c_code_cache_version(self): return (1,) - def c_code(self, node, name, (_indices, _indptr, _d, _g), (_zout, ), sub): - + def c_code(self, node, name, xxx_todo_changeme95, xxx_todo_changeme96, sub): + + (_indices, _indptr, _d, _g) = xxx_todo_changeme95 + (_zout, ) = xxx_todo_changeme96 if node.inputs[2].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for b') if node.inputs[3].type.dtype in ('complex64', 'complex128'): @@ -3092,13 +3189,15 @@ return gof.Apply(self, [a_indices, a_indptr, b, g_ab], [tensor.tensor(b.dtype, (False,))]) - def perform(self, node, (a_indices, a_indptr, b, g_ab), (out,)): + def perform(self, node, xxx_todo_changeme97, xxx_todo_changeme98): + (a_indices, a_indptr, b, g_ab) = xxx_todo_changeme97 + (out,) = xxx_todo_changeme98 g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype) - for i in xrange(len(a_indptr) - 1): # loop over rows + for i in range(len(a_indptr) - 1): # loop over rows ind0 = a_indptr[i] ind1 = a_indptr[i + 1] # loop over values in that row (columns) - for j_idx in xrange(ind0, ind1): + for j_idx in range(ind0, ind1): j = a_indices[j_idx] # grad is dot product of i-th row of gradient with j-th row of b # Depending on the type of g_ab and b (sparse or dense), @@ -3113,8 +3212,10 @@ def c_code_cache_version(self): RefactoringTool: No changes to ./theano/sparse/__init__.py RefactoringTool: No changes to ./theano/scan_module/tests/test_scan_opt.py RefactoringTool: Refactored ./theano/scan_module/tests/test_scan.py return (1,) - def c_code(self, node, name, (_indices, _indptr, _d, _g), (_zout, ), sub): - + def c_code(self, node, name, xxx_todo_changeme99, xxx_todo_changeme100, sub): + + (_indices, _indptr, _d, _g) = xxx_todo_changeme99 + (_zout, ) = xxx_todo_changeme100 if node.inputs[2].type.dtype in ('complex64', 'complex128'): raise NotImplementedError('Complex types are not supported for b') if node.inputs[3].type.dtype in ('complex64', 'complex128'): @@ -3267,7 +3368,9 @@ return gof.Apply(self, [x, y, p], [p.type()]) - def perform(self, node, (x, y, p), (out,)): + def perform(self, node, xxx_todo_changeme101, xxx_todo_changeme102): + (x, y, p) = xxx_todo_changeme101 + (out,) = xxx_todo_changeme102 if _is_sparse(x): raise TypeError(x) @@ -3279,7 +3382,9 @@ out[0] = p.__class__(p.multiply(numpy.dot(x, y.T))) - def grad(self, (x, y, p), (gz,)): + def grad(self, xxx_todo_changeme103, xxx_todo_changeme104): + (x, y, p) = xxx_todo_changeme103 + (gz,) = xxx_todo_changeme104 rval = [ dot(p * gz, y), dot((p * gz).T, x), @@ -3386,7 +3491,9 @@ out[0] = theano._asarray(rval, dtype=node.outputs[0].dtype) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme105, xxx_todo_changeme106): + (x, y) = xxx_todo_changeme105 + (gz,) = xxx_todo_changeme106 assert _is_sparse_variable(x) or _is_sparse_variable(y) rval = [] @@ -3481,7 +3588,9 @@ [tensor.tensor(dtype=dtype_out, broadcastable=(False, False))]) - def perform(self, node, (alpha, x, y, z), (out, )): + def perform(self, node, xxx_todo_changeme107, xxx_todo_changeme108): + (alpha, x, y, z) = xxx_todo_changeme107 + (out, ) = xxx_todo_changeme108 x_is_sparse = _is_sparse(x) y_is_sparse = _is_sparse(y) --- ./theano/scan_module/tests/test_scan.py (original) +++ ./theano/scan_module/tests/test_scan.py (refactored) @@ -4,7 +4,7 @@ import time import unittest -import cPickle +import pickle import numpy from nose.plugins.skip import SkipTest from numpy.testing import dec @@ -89,7 +89,7 @@ f_x = f(*pt) gx = [] # now iterate over the elements of x and call f on those + delta x - for i in xrange(len(pt)): + for i in range(len(pt)): if ndarray_mask[i]: # It is a ndarray that we can tweak if eps: @@ -99,7 +99,7 @@ if pt[i].ndim: _g = [] # it has several dimensions: - for pos in xrange(prod(pt[i].shape)): + for pos in range(prod(pt[i].shape)): t = pt[i].copy() t = t.flatten() t[pos] += _eps @@ -123,7 +123,7 @@ """Return the biggest relative error between g_pt and self.gx""" g_pt = [] - for i in xrange(len(_g_pt)): + for i in range(len(_g_pt)): if self.ndarray_mask[i]: g_pt.append(_g_pt[i]) elif isinstance(_g_pt[i], numpy.ndarray): @@ -237,12 +237,12 @@ f_out = open('tmp_scan_test_pickle.pkl', 'wb') try: - cPickle.dump(_my_f, f_out, protocol=-1) + pickle.dump(_my_f, f_out, protocol=-1) finally: f_out.close() f_in = open('tmp_scan_test_pickle.pkl', 'rb') try: - my_f = cPickle.load(f_in) + my_f = pickle.load(f_in) finally: f_in.close() finally: @@ -256,7 +256,7 @@ steps = 5 numpy_values = numpy.array([state * (2 ** (k + 1)) for k - in xrange(steps)]) + in range(steps)]) theano_values = my_f(state, steps) utt.assert_allclose(numpy_values, theano_values) @@ -285,7 +285,7 @@ steps = 5 numpy_values = numpy.array([state * (2 ** (k + 1)) for k - in xrange(steps)]) + in range(steps)]) theano_values = my_f(state, steps) utt.assert_allclose(numpy_values, theano_values) @@ -356,7 +356,7 @@ # compute the output in numpy v_out = numpy.zeros((4,)) v_out[0] = v_u[0] * W_in + v_x0 * W - for step in xrange(1, 4): + for step in range(1, 4): v_out[step] = v_u[step] * W_in + v_out[step - 1] * W theano_values = f2(v_u, v_x0, W_in, W) utt.assert_allclose(theano_values, v_out) @@ -410,7 +410,7 @@ # compute the output in numpy v_out = numpy.zeros((4,)) v_out[0] = v_u[0] * W_in + v_x0 * W - for step in xrange(1, 4): + for step in range(1, 4): v_out[step] = v_u[step] * W_in + v_out[step - 1] * W theano_values = f2(v_u, v_x0, W_in, W) utt.assert_allclose(theano_values, v_out) @@ -480,7 +480,7 @@ # compute the output in numpy v_out = numpy.zeros((4,)) v_out[0] = v_u[0] * W_in + v_x0 * W - for step in xrange(1, 4): + for step in range(1, 4): v_out[step] = v_u[step] * W_in + v_out[step - 1] * W theano_values = f2(v_u, v_x0, W_in, W) utt.assert_allclose(theano_values, v_out) @@ -547,7 +547,7 @@ v_out2 = numpy.zeros((4,), dtype='int64') v_out1[0] = v_u[0] * W_in + v_x0 * W v_out2[0] = v_u[0] + v_x0 - for step in xrange(1, 4): + for step in range(1, 4): v_out1[step] = v_u[step] * W_in + v_out1[step - 1] * W v_out2[step] = numpy.int64(v_u[step] + v_out1[step - 1]) @@ -592,7 +592,7 @@ # compute the output i numpy v_out = numpy.zeros((4,)) v_out[0] = v_u[0] * W_in.get_value() + v_x0 * W.get_value() - for step in xrange(1, 4): + for step in range(1, 4): v_out[step] = v_u[step] * W_in.get_value() + \ v_out[step - 1] * W.get_value() @@ -644,7 +644,7 @@ v_x[0] = numpy.dot(v_u1[0], vW_in1) + v_u2[0] * vW_in2 + \ numpy.dot(v_x0, vW) v_y[0] = numpy.dot(v_x0, vWout) - for i in xrange(1, 3): + for i in range(1, 3): v_x[i] = numpy.dot(v_u1[i], vW_in1) + v_u2[i] * vW_in2 + \ numpy.dot(v_x[i - 1], vW) v_y[i] = numpy.dot(v_x[i - 1], vWout) @@ -881,14 +881,14 @@ allow_input_downcast=True) scan_node = [x for x in f9.maker.fgraph.toposort() if isinstance(x.op, theano.scan_module.scan_op.Scan)] - assert 0 in scan_node[0].op.destroy_map.keys() - assert 1 in scan_node[0].op.destroy_map.keys() + assert 0 in list(scan_node[0].op.destroy_map.keys()) + assert 1 in list(scan_node[0].op.destroy_map.keys()) # compute output in numpy numpy_x0 = numpy.zeros((3,)) numpy_x1 = numpy.zeros((3,)) numpy_x0[0] = vu0[0] * vW_in + vx0 * vW + vu1[0] * vu2[0] numpy_x1[0] = vu0[0] * vW_in + vx1 * vW + vu1[0] + vu2[0] - for i in xrange(1, 3): + for i in range(1, 3): numpy_x0[i] = vu0[i] * vW_in + numpy_x0[i - 1] * vW + \ vu1[i] * vu2[i] numpy_x1[i] = vu0[i] * vW_in + numpy_x1[i - 1] * vW + \ @@ -961,14 +961,14 @@ scan_node = [x for x in f9.maker.fgraph.toposort() if isinstance(x.op, theano.scan_module.scan_op.Scan)] - assert 0 in scan_node[0].op.destroy_map.keys() - assert 1 in scan_node[0].op.destroy_map.keys() + assert 0 in list(scan_node[0].op.destroy_map.keys()) + assert 1 in list(scan_node[0].op.destroy_map.keys()) # compute output in numpy numpy_x0 = numpy.zeros((3,)) numpy_x1 = numpy.zeros((3,)) numpy_x0[0] = vu0[0] * vW_in + vx0 * vW + vu1[0] * vu1[1] numpy_x1[0] = vu0[0] * vW_in + vx1 * vW + vu2[0] + vu2[1] + vu2[2] - for i in xrange(1, 3): + for i in range(1, 3): numpy_x0[i] = vu0[i] * vW_in + numpy_x0[i - 1] * vW + \ vu1[i] * vu1[i + 1] numpy_x1[i] = vu0[i] * vW_in + numpy_x1[i - 1] * vW + \ @@ -1016,8 +1016,8 @@ mode=mode) scan_node = [x for x in f9.maker.fgraph.toposort() if isinstance(x.op, theano.scan_module.scan_op.Scan)] - assert 0 not in scan_node[0].op.destroy_map.keys() - assert 1 in scan_node[0].op.destroy_map.keys() + assert 0 not in list(scan_node[0].op.destroy_map.keys()) + assert 1 in list(scan_node[0].op.destroy_map.keys()) # Shared variable with updates def test_shared_arguments_with_updates(self): @@ -1103,7 +1103,7 @@ numpy_y1[0] = vy1 numpy_W1 = vW1.copy() numpy_W2 = vW2.copy() - for idx in xrange(3): + for idx in range(3): numpy_y0[idx + 3] = numpy.dot(\ numpy.dot(vu1[idx, :], numpy_W1), \ numpy_W2) + \ @@ -1184,7 +1184,7 @@ rng = numpy.random.RandomState(int(rng_seed)) # int() is for 32bit numpy_v = numpy.zeros((10, 2)) - for i in xrange(10): + for i in range(10): numpy_v[i] = rng.uniform(-1, 1, size=(2,)) theano_v = my_f() @@ -1392,7 +1392,7 @@ # compute the output in numpy v_out = numpy.zeros((4,)) v_out[0] = v_u[3] * W_in + v_x0 * W - for step in xrange(1, 4): + for step in range(1, 4): v_out[step] = v_u[3 - step] * W_in + v_out[step - 1] * W theano_values = f2(v_u, v_x0, W_in, W) @@ -2112,7 +2112,7 @@ v_x[0] = numpy.dot(v_u1[0], vW_in1) + v_u2[0] * vW_in2 + \ numpy.dot(v_x0, vW) v_y[0] = numpy.dot(v_x0, vWout) + v_y0[2] - for i in xrange(1, 3): + for i in range(1, 3): v_x[i] = numpy.dot(v_u1[i], vW_in1) + v_u2[i] * vW_in2 + \ numpy.dot(v_x[i - 1], vW) v_y[i] = numpy.dot(v_x[i - 1], vWout) + v_y[i - 1] @@ -2201,7 +2201,7 @@ numpy.dot(v_x0, vW) v_y[0] = numpy.dot(v_x0, vWout) + v_y0[2] - for i in xrange(1, 8): + for i in range(1, 8): v_x[i] = numpy.dot(v_u1[i], vW_in1) + v_u2[i] * vW_in2 + \ numpy.dot(v_x[i - 1], vW) v_y[i] = numpy.dot(v_x[i - 1], vWout) + v_y[i - 1] @@ -3166,7 +3166,7 @@ numpy.dot(v_x0, vW) v_y[0] = numpy.dot(v_x0, vWout) + v_y0[2] - for i in xrange(1, 8): + for i in range(1, 8): v_x[i] = numpy.dot(v_u1[i], vW_in1) + v_u2[i] * vW_in2 + \ numpy.dot(v_x[i - 1], vW) v_y[i] = numpy.dot(v_x[i - 1], vWout) + v_y[i - 1] @@ -3268,7 +3268,7 @@ n_steps=10, truncate_gradient=-1, go_backwards=False) - cost = updates.values()[0] + cost = list(updates.values())[0] g_sh = tensor.grad(cost, shared_var) fgrad = theano.function([], g_sh) assert fgrad() == 1 @@ -3712,10 +3712,10 @@ r = numpy.arange(10000).astype(theano.config.floatX).reshape(1000, 10) t0 = time.time() - for i in xrange(1, 1000): + for i in range(1, 1000): r[i] += r[i - 1] t1 = time.time() - print 'python', t1 - t0 + print('python', t1 - t0) r = numpy.arange(10000).astype(theano.config.floatX).reshape(1000, 10) t0 = time.time() @@ -3731,12 +3731,12 @@ else: while True: try: - tmp = r_i.next() - tmp += r_ii.next() + tmp = next(r_i) + tmp += next(r_ii) except StopIteration: break t1 = time.time() - print 'python with builtin iterator', t1 - t0 + print('pythonRefactoringTool: No changes to ./theano/scan_module/scan_views.py RefactoringTool: Refactored ./theano/scan_module/scan_utils.py with builtin iterator', t1 - t0) if 1: r = numpy.arange(10000).astype(theano.config.floatX).reshape(1000, 10) @@ -3751,7 +3751,7 @@ t2 = time.time() f(r) t3 = time.time() - print 'theano (scan, cvm)', t3 - t2 + print('theano (scan, cvm)', t3 - t2) if 1: r = numpy.arange(10000).astype(theano.config.floatX).reshape(-1, 10) @@ -3769,11 +3769,11 @@ f._check_for_aliased_inputs = False t2 = time.time() f_fn = f.fn - for i in xrange(998): + for i in range(998): f_fn() f() # 999 to update the profiling timers t3 = time.time() - print 'theano (updates, cvm)', t3 - t2 + print('theano (updates, cvm)', t3 - t2) #print shared_r.get_value() @@ -3804,10 +3804,10 @@ w = numpy.random.randn(N, N).astype(theano.config.floatX) t0 = time.time() - for i in xrange(1, L): + for i in range(1, L): r[i] = numpy.tanh(numpy.dot(r[i - 1], w)) t1 = time.time() - print 'python', t1 - t0 + print('python', t1 - t0) if 1: r = numpy.arange(L * N).astype(theano.config.floatX).reshape(L, N) @@ -3823,7 +3823,7 @@ t2 = time.time() f(r) t3 = time.time() - print 'theano (scan, cvm)', t3 - t2 + print('theano (scan, cvm)', t3 - t2) if 1: r = numpy.arange(L * N).astype(theano.config.floatX).reshape(L, N) @@ -3849,7 +3849,7 @@ f_fn(n_calls=L - 2) f() # 999 to update the profiling timers t3 = time.time() - print 'theano (updates, cvm)', t3 - t2 + print('theano (updates, cvm)', t3 - t2) #print shared_r.get_value() @@ -3883,10 +3883,10 @@ w = numpy.random.randn(N, N).astype(theano.config.floatX) t0 = time.time() - for i in xrange(1, L): + for i in range(1, L): r[i] = numpy.tanh(numpy.dot(r[i - 1], w)) t1 = time.time() - print 'python', t1 - t0 + print('python', t1 - t0) if 1: r = numpy.arange(B * L * N).astype( @@ -3914,12 +3914,12 @@ f_fn(n_calls=L - 2) f() # 999 to update the profiling timers t3 = time.time() - print 'theano (updates, cvm)', t3 - t2 + print('theano (updates, cvm)', t3 - t2) if __name__ == '__main__': #''' - print ' Use nosetests to run these tests ' + print(' Use nosetests to run these tests ') ''' scan_tst = T_Scan() #'' --- ./theano/scan_module/scan_utils.py (original) +++ ./theano/scan_module/scan_utils.py (refactored) @@ -15,7 +15,7 @@ import copy import logging -from itertools import izip + import numpy import warnings @@ -146,7 +146,7 @@ def hash_listsDictsTuples(x): hash_value = 0 if isinstance(x, dict): - for k, v in x.iteritems(): + for k, v in x.items(): hash_value ^= hash_listsDictsTuples(k) hash_value ^= hash_listsDictsTuples(v) elif isinstance(x, (list, tuple)): @@ -181,7 +181,7 @@ will always have the same value. """ if isinstance(replace, dict): - items = replace.items() + items = list(replace.items()) elif isinstance(replace, (list, tuple)): items = replace elif replace is None: @@ -266,14 +266,14 @@ if isinstance(x, list) or isinstance(x, tuple): iter_on = x elif isinstance(x, dict): - iter_on = x.iteritems() + iter_on = iter(x.items()) if iter_on is not None: - return all(filter(y) for y in iter_on) + return all(list(filter(y)) for y in iter_on) else: return (isinstance(x, theano.Variable) or isinstance(x, theano.scan_module.until)) - if not filter(ls): + if not list(filter(ls)): raise ValueError( 'The return value of your scan lambda expression may only be ' 'made of lists, tuples, or dictionaries containing Theano ' @@ -332,7 +332,7 @@ try: isNaN = numpy.isnan(x) isInf = numpy.isinf(x) - isStr = isinstance(x, basestring) + isStr = isinstance(x, str) except Exception: isNaN = False isInf = False @@ -345,7 +345,7 @@ except Exception: isNaN = False isInf = False - if isinstance(x, gof.Constant) and isinstance(x.data, basestring): + if isinstance(x, gof.Constant) and isinstance(x.data, str): isStr = True else: isStr = False @@ -360,7 +360,7 @@ # Corner case that I might use in an optimization if size == 0: return tensor_var - shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)] + shapes = [tensor_var.shape[x] for x in range(tensor_var.ndim)] zeros_shape = [size + shapes[0]] + shapes[1:] empty = tensor.zeros(zeros_shape, dtype=tensor_var.dtype) @@ -388,7 +388,7 @@ if in_ys is None: in_ys = [] - for x, y in izip(xs, ys): + for x, y in zip(xs, ys): if x.owner and not y.owner: return False if y.owner and not x.owner: @@ -398,7 +398,7 @@ return False if len(in_xs) != len(in_ys): return False - for _x, _y in izip(in_xs, in_ys): + for _x, _y in zip(in_xs, in_ys): if _x.type != _y.type: return False @@ -410,7 +410,7 @@ n_nodes = len(nds_x) cont = True idx = 0 - for dx, dy in izip(xs, ys): + for dx, dy in zip(xs, ys): if not dx.owner or not dy.owner: if dy.owner or dx.owner: return False @@ -435,7 +435,7 @@ elif len(nd_x.outputs) != len(nd_y.outputs): cont = False else: - for dx, dy in izip(nd_x.inputs, nd_y.inputs): + for dx, dy in zip(nd_x.inputs, nd_y.inputs): if (dx, dy) not in common: if dx != dy: if (isinstance(dx, tensor.Constant) and @@ -450,7 +450,7 @@ cont = False if cont: - for dx, dy in izip(nd_x.outputs, nd_y.outputs): + for dx, dy in zip(nd_x.outputs, nd_y.outputs): common.add((dx, dy)) idx += 1 @@ -469,7 +469,7 @@ # inside. We don't use the full ShapeFeature interface, but we # let it initialize itself with an empty fgraph, otherwise we will # need to do it manually - for inp, inp_shp in izip(inputs, input_shapes): + for inp, inp_shp in zip(inputs, input_shapes): if inp_shp is not None and len(inp_shp) != inp.ndim: assert len(inp_shp) == inp.ndim @@ -477,7 +477,7 @@ shape_feature.on_attach(theano.gof.FunctionGraph([], [])) # Initialize shape_of with the input shapes - for inp, inp_shp in izip(inputs, input_shapes): + for inp, inp_shp in zip(inputs, input_shapes): shape_feature.set_shape(inp, inp_shp) def local_traverse(out): @@ -533,8 +533,8 @@ # Mapping from invalid variables to equivalent valid ones. self.valid_equivalent = valid_equivalent.copy() - self.valid.update(valid_equivalent.values()) - self.invalid.update(valid_equivalent.keys()) + self.valid.update(list(valid_equivalent.values())) + self.invalid.update(list(valid_equivalent.keys())) def check(self, out): ''' @@ -605,8 +605,8 @@ n_ins = len(op.info['tap_array'][idx]) out_ins += [op.inputs[offset:offset + n_ins]] offset += n_ins - out_ins += [[] for k in xrange(op.n_nit_sot)] - out_ins += [[op.inputs[offset + k]] for k in xrange(op.n_shared_outs)] + out_ins += [[] for k in range(op.n_nit_sot)] + out_ins += [[op.inputs[offset + k]] for k in range(op.n_shared_outs)] added = True out_idxs_mask = [1 for idx in out_idxs] @@ -663,7 +663,7 @@ i_offset = op.n_seqs o_offset = 0 curr_pos = 0 - for idx in xrange(op.info['n_mit_mot']): + for idx in range(op.info['n_mit_mot']): if offset + idx not in not_required: map_old_new[offset + idx] = currRefactoringTool: No changes to ./theano/scan_module/scan_perform_ext.py RefactoringTool: Refactored ./theano/scan_module/scan_opt.py _pos curr_pos += 1 @@ -687,7 +687,7 @@ offset += op.n_mit_mot ni_offset += op.n_mit_mot - for idx in xrange(op.info['n_mit_sot']): + for idx in range(op.info['n_mit_sot']): if offset + idx not in not_required: map_old_new[offset + idx] = curr_pos curr_pos += 1 @@ -708,7 +708,7 @@ offset += op.n_mit_sot ni_offset += op.n_mit_sot - for idx in xrange(op.info['n_sit_sot']): + for idx in range(op.info['n_sit_sot']): if offset + idx not in not_required: map_old_new[offset + idx] = curr_pos curr_pos += 1 @@ -729,7 +729,7 @@ offset += op.n_sit_sot ni_offset += op.n_sit_sot nit_sot_ins = [] - for idx in xrange(op.info['n_nit_sot']): + for idx in range(op.info['n_nit_sot']): if offset + idx not in not_required: map_old_new[offset + idx] = curr_pos curr_pos += 1 @@ -742,7 +742,7 @@ offset += op.n_nit_sot shared_ins = [] - for idx in xrange(op.info['n_shared_outs']): + for idx in range(op.info['n_shared_outs']): if offset + idx not in not_required: map_old_new[offset + idx] = curr_pos curr_pos += 1 @@ -792,7 +792,7 @@ tag = '' nw_inputs = [safe_new(x, tag) for x in inputs] givens = OrderedDict() - for nw_x, x in izip(nw_inputs, inputs): + for nw_x, x in zip(nw_inputs, inputs): givens[x] = nw_x allinputs = theano.gof.graph.inputs(outputs) for inp in allinputs: --- ./theano/scan_module/scan_opt.py (original) +++ ./theano/scan_module/scan_opt.py (refactored) @@ -93,7 +93,7 @@ nw_outer = [node.inputs[0]] all_ins = gof.graph.inputs(op_outs) - for idx in xrange(op.n_seqs): + for idx in range(op.n_seqs): if (isinstance(node.inputs[idx + 1], tensor.TensorConstant) and node.inputs[idx + 1].tag.unique_value is not None): try: @@ -296,7 +296,7 @@ **dict(return_list=True))[0].owner fgraph.replace_all_validate_remove( - zip(node.outputs, nw_node.outputs), + list(zip(node.outputs, nw_node.outputs)), remove=[node], reason='scanOp_pushout_nonseqs_ops') return True @@ -307,7 +307,7 @@ if out in local_fgraph.outputs: x = node.outputs[local_fgraph.outputs.index(out)] y = replace_with_out[idx] - shape = [y.shape[idx] for idx in xrange(y.ndim)] + shape = [y.shape[idx] for idx in range(y.ndim)] replace_with[x] = tensor.alloc(y, node.inputs[0], *shape) @@ -317,7 +317,7 @@ # subtensor is applied that takes only the last element if replace_with: fgraph.replace_all_validate_remove( - replace_with.items(), + list(replace_with.items()), remove=[node], reason='scanOp_pushout_nonseqs_ops') @@ -495,7 +495,7 @@ **dict(return_list=True))[0].owner fgraph.replace_all_validate_remove( - zip(node.outputs, nw_node.outputs), + list(zip(node.outputs, nw_node.outputs)), remove=[node], reason='scanOp_pushout_seqs_ops') return True @@ -527,7 +527,7 @@ # We need to add one extra dimension to the outputs if replace_with and len(replace_with) == len(node.outputs): fgraph.replace_all_validate_remove( - replace_with.items(), + list(replace_with.items()), remove=[node], reason='scanOp_pushout_seqs_ops') return True @@ -552,13 +552,13 @@ scan_nodes = [x for x in nodes if (isinstance(x.op, scan_op.Scan) and x.op.info['gpu'] == self.gpu_flag)] - for scan_idx in xrange(len(scan_nodes)): + for scan_idx in range(len(scan_nodes)): node = scan_nodes[scan_idx] op = node.op n_outs = (op.info['n_mit_mot'] + op.info['n_mit_sot'] + op.info['n_sit_sot']) - for pos in xrange(n_outs): + for pos in range(n_outs): info = copy.deepcopy(op.info) if not 'destroy_map' in info: info['destroy_map'] = OrderedDict() @@ -572,7 +572,7 @@ ls_end += op.outer_nitsot(node.inputs) ls_end += op.outer_non_seqs(node.inputs) n_outs = len(ls) - for idx in xrange(n_outs): + for idx in range(n_outs): if ls[idx] in ls[:idx]: ls[idx] = deep_copy_op(ls[idx]) @@ -586,12 +586,12 @@ new_outs = new_op(*inputs, **dict(return_list=True)) try: fgraph.replace_all_validate_remove( - zip(node.outputs, new_outs), + list(zip(node.outputs, new_outs)), remove=[node], reason='scanOp_make_inplace') op = new_op node = new_outs[0].owner - except InconsistencyError, e: + except InconsistencyError as e: # Failed moving output to be comptued inplace pass @@ -646,9 +646,9 @@ op = node.op c_outs = op.n_mit_mot + op.n_mit_sot + op.n_sit_sot + op.n_nit_sot - init_l = [0 for x in xrange(op.n_mit_mot)] + init_l = [0 for x in range(op.n_mit_mot)] init_l += [abs(numpy.min(v)) for v in op.tap_array[op.n_mit_mot:]] - init_l += [0 for x in xrange(op.n_nit_sot)] + init_l += [0 for x in range(op.n_nit_sot)] # 2. Check the clients of each output and see for how many steps # does scan need to run @@ -689,7 +689,7 @@ # Note that for mit_mot outputs and shared outputs we can not change # the number of intermediate steps stored without affecting the # result of the op - store_steps = [0 for o in xrange(op.n_mit_mot)] + store_steps = [0 for o in range(op.n_mit_mot)] store_steps += [-1 for o in node.outputs[op.n_mit_mot:c_outs]] # Flag that says if an input has changed and we need to do something # or not @@ -769,15 +769,15 @@ if isinstance(stop, tensor.Variable): global_nsteps['sym'] += [stop] # not if it is maxsize - elif (type(stop) in (int, long) and + elif (type(stop) in (int, int) and stop == maxsize): global_nsteps = None # yes if it is a int k, 0 < k < maxsize - elif (type(stop) in (int, long) and + elif (type(stop) in (int, int) and global_nsteps['real'] < stop): global_nsteps['real'] = stop # yes if it is a int k, 0 < k < maxsize - elif (type(stop) in (int, long) and stop > 0): + elif (type(stop) in (int, int) and stop > 0): pass # not otherwise else: @@ -956,7 +956,7 @@ (inps, outs, info, node_ins, compress_map) = \ scan_utils.compress_outs(op, not_required, nw_inputs) inv_compress_map = OrderedDict() - for k, v in compress_map.items(): + for k, v in list(compress_map.items()): inv_compress_map[v] = k node_ins = [pre_greedy_local_optimizer(list_opt_slice, x) for x in @@ -1143,7 +1143,7 @@ # SitSot RefactoringTool: Refactored ./theano/scan_module/scan_op.py inner_ins[idx].append( rename(nd.op.inner_sitsot(nd.op.inputs), idx)) - info['tap_array'] += [[-1] for x in xrange(nd.op.n_sit_sot)] + info['tap_array'] += [[-1] for x in range(nd.op.n_sit_sot)] inner_outs[idx].append(nd.op.inner_sitsot_outs(nd.op.outputs)) outer_ins += rename(nd.op.outer_sitsot(nd.inputs), idx) outer_outs += nd.op.outer_sitsot_outs(nd.outputs) @@ -1239,7 +1239,7 @@ if not isinstance(new_outs, (list, tuple)): new_outs = [new_outs] - return zip(outer_outs, new_outs) + return list(zip(outer_outs, new_outs)) def belongs_to_set(self, node, set_nodes): """ @@ -1667,10 +1667,10 @@ new_out = tensor.dot(val, out_seq) pos = node.outputs.index(outer_out) - old_new = zip(node.outputs[:pos], new_outs[:pos]) + old_new = list(zip(node.outputs[:pos], new_outs[:pos])) old = node.outputs[pos].clients[0][0].outputs[0] old_new.append((old, new_out)) - old_new += zip(node.outputs[pos+1:], new_outs[pos:]) + old_new += list(zip(node.outputs[pos+1:], new_outs[pos:])) fgraph.replace_all_validate_remove( old_new, remove=[node], reason='scan_pushout_dot1') --- ./theano/scan_module/scan_op.py (original) +++ ./theano/scan_module/scan_op.py (refactored) @@ -15,7 +15,7 @@ import itertools import logging import time -from itertools import izip + import numpy @@ -170,7 +170,7 @@ # Pre-computing some values to speed up perform self.mintaps = [numpy.min(x) for x in self.tap_array] - self.mintaps += [0 for x in xrange(self.n_nit_sot)] + self.mintaps += [0 for x in range(self.n_nit_sot)] self.seqs_arg_offset = 1 + self.n_seqs self.shared_arg_offset = (self.seqs_arg_offset + self.n_mit_mot + @@ -278,7 +278,7 @@ self.outer_mitmot(inputs))): outer_mitmot = format(_outer_mitmot, as_var=inner_mitmot[ipos]) new_inputs.append(outer_mitmot) - for k in xrange(len(itaps)): + for k in range(len(itaps)): if (inner_mitmot[ipos + k].type.dtype != outer_mitmot.type.dtype or inner_mitmot[ipos + k].ndim != outer_mitmot.ndim - 1): @@ -293,7 +293,7 @@ k].type.dtype, inner_mitmot[ipos + k].type.ndim)) ipos += len(itaps) - for k in xrange(len(otaps)): + for k in range(len(otaps)): if (inner_mitmot_outs[opos + k].type.dtype != \ outer_mitmot.type.dtype or inner_mitmot_outs[opos + k].ndim != \ @@ -317,7 +317,7 @@ outer_mitsot = format(_outer_mitsot, as_var=inner_mitsots[ipos]) new_inputs.append(outer_mitsot) - for k in xrange(len(itaps)): + for k in range(len(itaps)): if (inner_mitsots[ipos + k].type.dtype != \ outer_mitsot.type.dtype or inner_mitsots[ipos + k].ndim != outer_mitsot.ndim - 1): @@ -466,7 +466,7 @@ # If everything went OK up to here, there is still one thing to # check. Namely, do the internal graph represent same # computations - for self_in, other_in in izip(self.inputs, other.inputs): + for self_in, other_in in zip(self.inputs, other.inputs): if self_in.type != other_in.type: return False @@ -492,7 +492,7 @@ aux_txt = '%s' if getattr(self, 'destroy_map', None) is None: self.destroy_map = OrderedDict() - if len(self.destroy_map.keys()) > 0: + if len(list(self.destroy_map.keys())) > 0: # Check if all outputs are inplace if (sorted(self.destroy_map.keys()) == \ sorted(range(self.n_mit_mot + @@ -501,7 +501,7 @@ aux_txt += 'all_inplace,%s,%s}' else: aux_txt += '{inplace{' - for k in self.destroy_map.keys(): + for k in list(self.destroy_map.keys()): aux_txt += str(k) + ',' aux_txt += '},%s,%s}' else: @@ -558,9 +558,9 @@ wrapped_outputs += self.outputs[slices:] profile = None if (theano.config.profile or - (isinstance(self.profile, (basestring, bool, int)) + (isinstance(self.profile, (str, bool, int)) and self.profile)): - if isinstance(self.profile, basestring): + if isinstance(self.profile, str): profile = ScanProfileStats(name=self.profile) else: profile = ScanProfileStats(name=self.name) @@ -618,12 +618,12 @@ if hasattr(self, 'destroy_map'): cython_destroy_map = [x in self.destroy_map - for x in xrange(len(node.outputs))] - else: - cython_destroy_map = [0 for x in xrange(len(node.outputs))] + for x in range(len(node.outputs))] + else: + cython_destroy_map = [0 for x in range(len(node.outputs))] cython_destroy_map = numpy.asarray(cython_destroy_map, dtype='int32') - import scan_perform_ext + from . import scan_perform_ext p = lambda node, args, outs:\ scan_perform_ext.perform( self.n_shared_outs, @@ -872,11 +872,11 @@ ] pos = [(-self.mintaps[idx]) % store_steps[idx] for idx - in xrange(self.n_outs + self.n_nit_sot)] + in range(self.n_outs + self.n_nit_sot)] if not getattr(self, 'destroy_map', None): self.destroy_map = OrderedDict() # 2.1 Create storage space for outputs - for idx in xrange(self.n_outs): + for idx in range(self.n_outs): if idx in self.destroy_map: # ^ Case 1. Outputs should be computed inplace of their # initial state @@ -902,7 +902,7 @@ fn = self.fn.fn offset = (self.n_seqs + sum(map(len, self.tap_array[:self.n_outs])) + self.n_shared_outs) - for idx in xrange(len(other_args)): + for idx in range(len(other_args)): input_storage[idx + offset].storage[0] = other_args[idx] i = 0 @@ -912,7 +912,7 @@ while (i < n_steps) and cond: # sequences over which scan iterates # 3. collect input slices - for idx in xrange(self.n_seqs): + for idx in range(self.n_seqs): if self.vector_seqs[idx]: input_storage[idx].storage[0] = \ seqs[idx][i:i + 1].reshape(()) @@ -920,7 +920,7 @@ input_storage[idx].storage[0] = seqs[idx][i] offset = self.n_seqs - for idx in xrange(self.n_outs): + for idx in range(self.n_outs): if self.vector_outs[idx]: for tap in self.tap_array[idx]: _idx = (pos[idx] + tap) % store_steps[idx] @@ -936,21 +936,21 @@ a_offset = self.shared_arg_offset o_offset = self.n_outs + self.n_nit_sot if i == 0: - for j in xrange(self.n_shared_outs): + for j in range(self.n_shared_outs): input_storage[offset].storage[0] = args[a_offset + j] offset += 1 else: - for j in xrange(self.n_shared_outs): + for j in range(self.n_shared_outs): input_storage[offset].storage[0] = outs[o_offset + j][0] offset += 1 # 4. collecting slices where the output should be stored - for idx in xrange(self.n_mit_mot_outs): + for idx in range(self.n_mit_mot_outs): output_storage[idx].storage[0] = None offset = self.n_mit_mot_outs if i != 0 and self.n_nit_sot > 0: - for idx in xrange(self.n_outs + self.n_nit_sot - + for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot): if (store_steps[idx + self.n_mit_mot] == 1 or self.vector_outs[idx + self.n_mit_mot]): @@ -960,12 +960,12 @@ output_storage[idx + offset].storage[0] =\ outs[_pos0][0][pos[_pos0]] else: - for idx in xrange(self.n_outs + self.n_nit_sot - + for idx in range(self.n_outs + self.n_nit_sot - self.n_mit_mot): output_storage[idx + offset].storage[0] = None offset += self.n_outs + self.n_nit_sot - self.n_mit_mot - for idx in xrange(self.n_shared_outs): + for idx in range(self.n_shared_outs): output_storage[idx + offset].storage[0] = None # If condition add it to the mix if self.as_while: @@ -1000,7 +1000,7 @@ t_fn += dt_fn offset_out = 0 # 5.1 Copy over the values for mit_mot outputs - for j in xrange(self.n_mit_mot): + for j in range(self.n_mit_mot): for k in self.mit_mot_out_slices[j]: outs[j][0][k + pos[j]] = \ output_storage[offset_out].storage[0] @@ -1011,7 +1011,7 @@ end = self.n_outs offset_out -= self.n_mit_mot - for j in xrange(begin, end): + for j in range(begin, end): if (store_steps[j] == 1 or self.vector_outs[j] or outs[j][0][pos[j]] is not output_storage[offset_out + j].storage[0]): @@ -1021,7 +1021,7 @@ # 5.3 Copy over the values for nit_sot outputs begin = end end += self.n_nit_sot - for j in xrange(begin, end): + for j in range(begin, end): if i == 0: jout = j + offset_out shape = (store_steps[j],) + \ @@ -1047,18 +1047,18 @@ # variables begin = end end += self.n_shared_outs - for j in xrange(begin, end): + for j in range(begin, end): jout = j + offset_out outs[j][0] = output_storage[jout].storage[0] pos = [(idx + 1) % store for idx, store in - itertools.izip(pos, store_steps)] + zip(pos, store_steps)] i = i + 1 # 6. Check if you need to re-order output buffers begin = self.n_mit_mot end = self.n_outs + self.n_nit_sot - for idx in xrange(begin, end): + for idx in range(begin, end): if (store_steps[idx] < i - self.mintaps[idx] and pos[idx] < store_steps[idx]): @@ -1140,7 +1140,7 @@ # Here, we build a list inner_ins_shape, such that inner_ins_shape[i] # is the shape of self.inputs[i] - for inp, inp_shp in izip(node.inputs, input_shapes): + for inp, inp_shp in zip(node.inputs, input_shapes): assert inp_shp is None or len(inp_shp) == inp.type.ndim # sequences @@ -1151,13 +1151,13 @@ # mit_mot, mit_sot, sit_sot n_outs = self.n_mit_mot + self.n_mit_sot + self.n_sit_sot outs_shape = [] - for idx in xrange(n_outs): + for idx in range(n_outs): for k in self.tap_array[idx]: outs_shape += [input_shapes[idx + self.n_seqs + 1][1:]] # shared_outs offset = 1 + self.n_seqs + n_outs - for idx in xrange(self.n_shared_outs): + for idx in range(self.n_shared_outs): outs_shape += [input_shapes[idx + offset]] # non_sequences @@ -1169,7 +1169,7 @@ # node.inputs inner_non_sequences = self.inputs[len(seqs_shape) + len(outs_shape):] out_equivalent = OrderedDict() - for in_ns, out_ns in izip(inner_non_sequences, node.inputs[offset:]): + for in_ns, out_ns in zip(inner_non_sequences, node.inputs[offset:]): out_equivalent[in_ns] = out_ns if self.as_while: self_outs = self.outputs[:-1] @@ -1191,7 +1191,7 @@ scan_outs = [x for x in input_shapes[offset:offset + n_outs]] offset += n_outs outs_shape_n = self.n_mit_mot_outs + self.n_mit_sot + self.n_sit_sot - for x in xrange(self.n_nit_sot): + for x in range(self.n_nit_sot): out_shape_x = outs_shape[outs_shape_n + x] if out_shape_x is None: # This output is not a tensor, and has no shape @@ -1203,7 +1203,7 @@ r = node.outputs[n_outs + x] assert r.ndim == 1 + len(out_shape_x) shp = [node.inputs[offset + self.n_shared_outs + x]] - for i, shp_i in izip(xrange(1, r.ndim), out_shape_x): + for i, shp_i in zip(range(1, r.ndim), out_shape_x): # Validate shp_i. v_shape_i is either None (if invalid), # or a (variable, Boolean) tuple. The Boolean indicates # whether variable is shp_i (if True), or an valid @@ -1225,7 +1225,7 @@ # leading dimension so we replace it for every entry with Shape_i if self.as_while: scan_outs = [(Shape_i(0)(o),) + x[1:] - for o, x in izip(node.outputs, scan_outs)] + for o, x in zip(node.outputs, scan_outs)] return scan_outs def get_input_pos(self, output_index): @@ -1317,7 +1317,7 @@ e = len(self.mitmot_out_taps()[0]) else: e = 1 - for p in xrange(oidx): + for p in range(oidx): s = e if p < self.n_mit_mot: e += len(self.mitmot_out_taps()[p]) @@ -1339,7 +1339,7 @@ node.inputs[iidx + 1]) return [self.inner_non_seqs(self.inputs)[loc_idx]] - for p in xrange(iidx): + for p in range(iidx): s = e if p < self.n_seqs: e += 1 @@ -1385,12 +1385,12 @@ # input to `z_t` then `x` is an input to `z_t`. n_outs = len(node.outputs) - for steps in xrange(n_outs): - for iidx in xrange(n_outs): - for jidx in xrange(n_outs): + for steps in range(n_outs): + for iidx in range(n_outs): + for jidx in range(n_outs): j_inp_idx = self.get_input_pos(jidx) + 1 if connection_pattern[j_inp_idx][iidx] == True: - for k in xrange(len(connection_pattern)): + for k in range(len(connection_pattern)): if connection_pattern[k][jidx]: connection_pattern[k][iidx] = True return connection_pattern @@ -1483,7 +1483,7 @@ consider_constant=wrt, disconnected_inputs='ignore', return_disconnected='None') - gmp = dict(zip(wrt, grads)) + gmp = dict(list(zip(wrt, grads))) rval = [gmp.get(p, None) for p in diff_inputs] return rval dC_dinps_t = [None for inp in diff_inputs] @@ -1525,13 +1525,13 @@ dC_dXt = safe_new(dC_douts[idx][0]) dC_dXts.append(dC_dXt) _dC_dinps_t = compute_gradient(Xt, dC_dXt) - for jdx in xrange(len(_dC_dinps_t)): + for jdx in range(len(_dC_dinps_t)): if dC_dinps_t[jdx] is None: dC_dinps_t[jdx] = _dC_dinps_t[jdx] elif _dC_dinps_t[jdx]: dC_dinps_t[jdx] += _dC_dinps_t[jdx] # mask inputs that get no gradients - for dx in xrange(len(dC_dinps_t)): + for dx in range(len(dC_dinps_t)): if not dC_dinps_t[dx]: dC_dinps_t[dx] = tensor.zeros_like(diff_inputs[dx]) else: @@ -1561,7 +1561,7 @@ # Construct scan op # Seqs outer_inp_seqs = [x[::-1] for x in inputs[1:1 + self.n_seqs]] - for idx in xrange(self.n_mit_mot + self.n_mit_sot): + for idx in range(self.n_mit_mot + self.n_mit_sot): mintap = numpy.min(self.tap_array[idx]) maxtap = numpy.max(self.tap_array[idx]) if idx < self.n_mit_mot: @@ -1626,7 +1626,7 @@ n_mitmot_outs = 0 n_mitmot_inps = 0 - for idx in xrange(self.n_mit_mot): + for idx in range(self.n_mit_mot): if isinstance(dC_douts[idx].type, DisconnectedType): out = outs[idx] outer_inp_mitmot.append(tensor.zeros_like(out)) @@ -1636,13 +1636,13 @@ mitmot_out_taps.append([]) undefined = False disconnected = True - for jdx in xrange(len(self.mit_mot_out_slices[idx])): + for jdx in range(len(self.mit_mot_out_slices[idx])): inner_inp_mitmot.append(dC_dXts[out_pos]) mitmot_inp_taps[idx].append(-self.mit_mot_out_slices[idx][jdx]) n_mitmot_inps += 1 out_pos += 1 - for jdx in xrange(len(self.tap_array[idx])): + for jdx in range(len(self.tap_array[idx])): inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs]) inner_out_mitmot.append(dC_dinps_t[ins_pos]) if not disconnected_dC_dinps_t[ins_pos]: @@ -1665,7 +1665,7 @@ type_outs.append('connected') offset = self.n_mit_mot - for idx in xrange(self.n_mit_sot): + for idx in range(self.n_mit_sot): mitmot_inp_taps.append([]) mitmot_out_taps.append([]) outer_inp_mitmot.append(dC_douts[idx + offset][::-1]) @@ -1676,7 +1676,7 @@ undefined = False disconnected = True mitmot_inp_taps[idx + offset].append(0) - for jdx in xrange(len(self.tap_array[idx_tap])): + for jdx in range(len(self.tap_array[idx_tap])): inner_inp_mitmot.append(dC_dXtm1s[ins_pos - self.n_seqs]) inner_out_mitmot.append(dC_dinps_t[ins_pos]) mitmot_inp_taps[idx + offset].append( @@ -1699,7 +1699,7 @@ type_outs.append('connected') offset += self.n_mit_sot - for idx in xrange(self.n_sit_sot): + for idx in range(self.n_sit_sot): mitmot_inp_taps.append([0, 1]) mitmot_out_taps.append([1]) undefined = False @@ -1759,14 +1759,14 @@ inner_inp_sitsot = dC_dXtm1s[ins_pos - self.n_seqs:] outer_inp_sitsot = [ tensor.zeros([grad_steps + 1] + - [x.shape[i] for i in xrange(x.ndim)], + [x.shape[i] for i in range(x.ndim)], dtype=y.dtype) for y, x in zip(inner_inp_sitsot, self.outer_non_seqs(inputs))] n_sitsot_outs = len(outer_inp_sitsot) new_tap_array = mitmot_inp_taps + [[-1] for k in - xrange(n_sitsot_outs)] + range(n_sitsot_outs)] info = OrderedDict() info['n_seqs'] = len(outer_inp_seqs) @@ -1793,7 +1793,7 @@ outer_inp_seqs + outer_inp_mitmot + outer_inp_sitsot + - [inputs[0] for x in xrange(n_nit_sot)] + + [inputs[0] for x in range(n_nit_sot)] + RefactoringTool: Refactored ./theano/scan_module/scan.py self.outer_shared(inputs) + self.outer_non_seqs(inputs)) @@ -1846,7 +1846,7 @@ start = len(gradients) node = outs[0].owner - for idx in xrange(self.n_shared_outs): + for idx in range(self.n_shared_outs): disconnected = True connected_flags = self.connection_pattern(node)[idx + start] for dC_dout, connected in zip(dC_douts, connected_flags): @@ -1862,7 +1862,7 @@ start = len(gradients) gradients += [DisconnectedType()() - for x in xrange(self.n_nit_sot)] + for x in range(self.n_nit_sot)] begin = end end = begin + n_sitsot_outs @@ -1884,9 +1884,9 @@ # from a computational point of view # The gradients of scan are computed replacing Disconnected with 0, # because through the recurrence they can become nonzero - for idx in xrange(len(gradients)): + for idx in range(len(gradients)): disconnected = True - for kdx in xrange(len(node.outputs)): + for kdx in range(len(node.outputs)): if connection_pattern[idx][kdx] and \ not isinstance(dC_douts[kdx].type, DisconnectedType): disconnected = False @@ -2119,16 +2119,16 @@ other_time): # Scan overhead profile if any([isinstance(node.op, Scan) and v > 0 for (_, node), v in - apply_time.items()]): - print - print 'Scan overhead:' + list(apply_time.items())]): + print() + print('Scan overhead:') print (' ') total_super_scan_time = 0 total_scan_fct_time = 0 total_scan_op_time = 0 - for (_, node), v in apply_time.items(): + for (_, node), v in list(apply_time.items()): if isinstance(node.op, Scan): if v > 0: scan_fct_time = node.op.mode_instance.fn_time @@ -2136,18 +2136,18 @@ total_super_scan_time += v total_scan_fct_time += scan_fct_time total_scan_op_time += scan_op_time - print ' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( + print(' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( v, scan_fct_time, scan_op_time, scan_fct_time / v * 100, - scan_op_time / v * 100), node + scan_op_time / v * 100), node) else: - print (' The node took 0s, so we can not ' - 'compute the overhead'), node - print ' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( + print((' The node took 0s, so we can not ' + 'compute the overhead'), node) + print(' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( total_super_scan_time, total_scan_fct_time, total_scan_op_time, total_scan_fct_time / total_super_scan_time * 100, - total_scan_op_time / total_super_scan_time * 100) + total_scan_op_time / total_super_scan_time * 100)) --- ./theano/scan_module/scan.py (original) +++ ./theano/scan_module/scan.py (refactored) @@ -379,7 +379,7 @@ return_steps = OrderedDict() # wrap sequences in a dictionary if they are not already dictionaries - for i in xrange(n_seqs): + for i in range(n_seqs): if not isinstance(seqs[i], dict): seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])]) elif seqs[i].get('taps', None): @@ -389,7 +389,7 @@ seqs[i]['taps'] = [0] # wrap outputs info in a dictionary if they are not already in one - for i in xrange(n_outs): + for i in range(n_outs): if outs_info[i] is not None: if isinstance(outs_info[i], dict): # DEPRECATED : @@ -469,7 +469,7 @@ try: nw_slice.tag.test_value = gof.Op._get_test_value( _seq_val_slice) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -599,7 +599,7 @@ if config.compute_test_value != 'off': try: arg.tag.test_value = gof.Op._get_test_value(actual_arg) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -659,7 +659,7 @@ try: nw_slice.tag.test_value = gof.Op._get_test_value( _init_out_var_slice) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -687,9 +687,9 @@ max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1 max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1 n_elems = numpy.max([max_mit_sot, max_sit_sot]) - _ordered_args = [[] for x in xrange(n_elems)] + _ordered_args = [[] for x in range(n_elems)] offset = 0 - for idx in xrange(n_mit_sot): + for idx in range(n_mit_sot): n_inputs = len(mit_sot_tap_array[idx]) if n_fixed_steps in [1, -1]: _ordered_args[mit_sot_rightOrder[idx]] = \ @@ -699,7 +699,7 @@ mit_sot_inner_inputs[offset:offset + n_inputs] offset += n_inputs - for idx in xrange(n_sit_sot): + for idx in range(n_sit_sot): if n_fixed_steps in [1, -1]: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_slices[idx]] @@ -778,9 +778,9 @@ # as non sequences at the end of our args fake_nonseqs = [x.type() for x in non_seqs] fake_outputs = scan_utils.clone(outputs, - replace=OrderedDict(zip(non_seqs, - fake_nonseqs))) - all_inputs = itertools.ifilter( + replace=OrderedDict(list(zip(non_seqs, + fake_nonseqs)))) + all_inputs = filter( lambda x: (isinstance(x, gof.Variable) and not isinstance(x, SharedVariable) and not isinstance(x, gof.Constant)), @@ -825,7 +825,7 @@ n_outs = len(dummy_f.maker.outputs) if as_while: n_outs = n_outs - 1 - outs_info = [OrderedDict() for x in xrange(n_outs)] + outs_info = [OrderedDict() for x in range(n_outs)] ## Step 5.1 Outputs with taps different then -1 @@ -902,7 +902,7 @@ if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))] - givens.update(OrderedDict(zip(other_scan_args, other_inner_args))) + givens.update(OrderedDict(list(zip(other_scan_args, other_inner_args)))) other_shared_scan_args = [arg.variable for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and @@ -911,8 +911,8 @@ in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and RefactoringTool: No changes to ./theano/scan_module/__init__.py RefactoringTool: No changes to ./theano/scalar/tests/test_basic_sympy.py RefactoringTool: No changes to ./theano/scalar/tests/test_basic.py RefactoringTool: Refactored ./theano/scalar/sharedvar.py RefactoringTool: No changes to ./theano/scalar/basic_sympy.py RefactoringTool: Refactored ./theano/scalar/basic_scipy.py RefactoringTool: Refactored ./theano/scalar/basic.py not arg.update)] - givens.update(OrderedDict(zip(other_shared_scan_args, - other_shared_inner_args))) + givens.update(OrderedDict(list(zip(other_shared_scan_args, + other_shared_inner_args)))) ## ### Step 6. Re-order the outputs and clone them replacing things @@ -945,7 +945,7 @@ # variables are put on GPU right aways >:| , new_givens = OrderedDict() - for w, w_copy in givens.iteritems(): + for w, w_copy in givens.items(): if (isinstance(w.type, cuda.CudaNdarrayType) and isinstance(w_copy.type, tensor.TensorType)): for o in inner_outs: @@ -961,7 +961,7 @@ ### Step 7. Create the Scan Op ## - tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)] + tap_array = mit_sot_tap_array + [[-1] for x in range(n_sit_sot)] info = OrderedDict() info['tap_array'] = tap_array @@ -991,7 +991,7 @@ mit_sot_scan_inputs + sit_sot_scan_inputs + shared_scan_inputs + - [actual_n_steps for x in xrange(n_nit_sot)] + + [actual_n_steps for x in range(n_nit_sot)] + other_shared_scan_args + other_scan_args) @@ -1037,7 +1037,7 @@ offsets) offset += n_mit_sot - offsets = [1 for x in xrange(n_sit_sot)] + offsets = [1 for x in range(n_sit_sot)] sit_sot_outs = remove_dimensions( scan_outs[offset:offset + n_sit_sot], sit_sot_return_steps, --- ./theano/scalar/sharedvar.py (original) +++ ./theano/scalar/sharedvar.py (refactored) @@ -23,7 +23,7 @@ import numpy from theano.compile import SharedVariable -from basic import Scalar, _scalar_py_operators +from .basic import Scalar, _scalar_py_operators class ScalarSharedVariable(_scalar_py_operators, SharedVariable): pass --- ./theano/scalar/basic_scipy.py (original) +++ ./theano/scalar/basic_scipy.py (refactored) @@ -171,7 +171,9 @@ else: super(Gamma, self).impl(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme, xxx_todo_changeme1): + (x, ) = xxx_todo_changeme + (gz, ) = xxx_todo_changeme1 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -182,7 +184,9 @@ return gz * gamma(x) * psi(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme2, xxx_todo_changeme3, sub): + (x, ) = xxx_todo_changeme2 + (z, ) = xxx_todo_changeme3 if node.inputs[0].type in float_types: return """%(z)s = tgamma(%(x)s);""" % locals() raise NotImplementedError('only floating point is implemented') --- ./theano/scalar/basic.py (original) +++ ./theano/scalar/basic.py (refactored) @@ -15,7 +15,8 @@ import math import warnings from copy import copy -from itertools import imap +import collections + from textwrap import dedent import numpy @@ -150,7 +151,7 @@ raise TypeError('Value cannot accurately be converted to dtype' ' (%s) and allow_downcast is not True' % self.dtype) - except Exception, e: + except Exception as e: raise TypeError("Could not convert %s (value=%s) to %s" % ( type(data), data, self.dtype), e) @@ -789,7 +790,7 @@ def __init__(self, output_types_preference=None, name=None): self.name = name if output_types_preference is not None: - if not callable(output_types_preference): + if not isinstance(output_types_preference, collections.Callable): raise TypeError( "Expected a callable for the 'output_types_preference' argument to %s. (got: %s)" % self.__class__, output_types_preference) @@ -854,7 +855,7 @@ if hasattr(self, 'name') and self.name: return self.name else: - param = [(k, v) for k, v in self.__dict__.items() + param = [(k, v) for k, v in list(self.__dict__.items()) if k not in ["name", "_op_use_c_code"]] if param: return "%s{%s}" % (self.__class__.__name__, @@ -887,7 +888,9 @@ amd_float32 = None amd_float64 = None - def c_code_contiguous(self, node, name, (x, ), (z, ), sub): + def c_code_contiguous(self, node, name, xxx_todo_changeme, xxx_todo_changeme1, sub): + (x, ) = xxx_todo_changeme + (z, ) = xxx_todo_changeme1 if (not theano.config.lib.amdlibm or # We compare the dtype AND the broadcast flag # as this function do not broadcast @@ -960,7 +963,9 @@ # built-in < don't support complex return numpy.less(x, y) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme2, xxx_todo_changeme3, sub): + (x, y) = xxx_todo_changeme2 + (z, ) = xxx_todo_changeme3 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s < %(y)s);" % locals() @@ -976,7 +981,9 @@ # built-in > don't support complex return numpy.greater(x, y) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme4, xxx_todo_changeme5, sub): + (x, y) = xxx_todo_changeme4 + (z, ) = xxx_todo_changeme5 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s > %(y)s);" % locals() @@ -992,7 +999,9 @@ # built-in <= don't support complex return numpy.less_equal(x, y) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme6, xxx_todo_changeme7, sub): + (x, y) = xxx_todo_changeme6 + (z, ) = xxx_todo_changeme7 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s <= %(y)s);" % locals() @@ -1008,7 +1017,9 @@ # built-in >= don't support complex return numpy.greater_equal(x, y) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme8, xxx_todo_changeme9, sub): + (x, y) = xxx_todo_changeme8 + (z, ) = xxx_todo_changeme9 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s >= %(y)s);" % locals() @@ -1023,7 +1034,9 @@ def impl(self, x, y): return x == y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme10, xxx_todo_changeme11, sub): + (x, y) = xxx_todo_changeme10 + (z, ) = xxx_todo_changeme11 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s == %(y)s);" % locals() @@ -1038,7 +1051,9 @@ def impl(self, x, y): return x != y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme12, xxx_todo_changeme13, sub): + (x, y) = xxx_todo_changeme12 + (z, ) = xxx_todo_changeme13 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = (%(x)s != %(y)s);" % locals() @@ -1049,7 +1064,9 @@ def impl(self, x): return numpy.isnan(x) - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme14, xxx_todo_changeme15, sub): + (x, ) = xxx_todo_changeme14 + (z, ) = xxx_todo_changeme15 if node.inputs[0].type in complex_types: raise NotImplementedError() return "%(z)s = isnan(%(x)s);" % locals() @@ -1060,7 +1077,9 @@ def impl(self, x): return numpy.isinf(x) - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme16, xxx_todo_changeme17, sub): + (x, ) = xxx_todo_changeme16 + (z, ) = xxx_todo_changeme17 if node.inputs[0].type in complex_types: raise NotImplementedError() # Note that the C isinf returns -1 for -Inf and +1 for +Inf, while @@ -1088,7 +1107,9 @@ return False return True - def c_code(self, node, name, (x, low, hi), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme18, xxx_todo_changeme19, sub): + (x, low, hi) = xxx_todo_changeme18 + (z, ) = xxx_todo_changeme19 if self.openlow: cmp1 = '>' else: @@ -1107,7 +1128,9 @@ return ("%(z)s = %(x)s %(cmp1)s %(low)s &&" " %(x)s %(cmp2)s %(hi)s;" % locals()) - def grad(self, (x, low, hi), (gz, )): + def grad(self, xxx_todo_changeme20, xxx_todo_changeme21): + (x, low, hi) = xxx_todo_changeme20 + (gz, ) = xxx_todo_changeme21 return None, None, None inopenrange = InRange(True, True) inclosedrange = InRange(False, False) @@ -1124,10 +1147,14 @@ #backport #return ift if cond else iff - def c_code(self, node, name, (cond, ift, iff), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme22, xxx_todo_changeme23, sub): + (cond, ift, iff) = xxx_todo_changeme22 + (z, ) = xxx_todo_changeme23 return "%(z)s = %(cond)s ? %(ift)s : %(iff)s;" % locals() - def grad(self, (cond, ift, iff), (gz, )): + def grad(self, xxx_todo_changeme24, xxx_todo_changeme25): + (cond, ift, iff) = xxx_todo_changeme24 + (gz, ) = xxx_todo_changeme25 first_part = switch(cond, gz, 0.) second_part = switch(cond, 0., gz) @@ -1143,7 +1170,8 @@ return (condition_grad, first_part, second_part) - def output_types(self, (cond_t, ift_t, iff_t)): + def output_types(self, xxx_todo_changeme26): + (cond_t, ift_t, iff_t) = xxx_todo_changeme26 return upcast_out(ift_t, iff_t) switch = Switch() @@ -1187,7 +1215,9 @@ def impl(self, x, y): return x | y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme27, xxx_todo_changeme28, sub): + (x, y) = xxx_todo_changeme27 + (z, ) = xxx_todo_changeme28 return "%(z)s = (%(x)s | %(y)s);" % locals() or_ = OR() @@ -1200,7 +1230,9 @@ def impl(self, x, y): return x ^ y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme29, xxx_todo_changeme30, sub): + (x, y) = xxx_todo_changeme29 + (z, ) = xxx_todo_changeme30 return "%(z)s = (%(x)s ^ %(y)s);" % locals() xor = XOR() @@ -1213,7 +1245,9 @@ def impl(self, x, y): return x & y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme31, xxx_todo_changeme32, sub): + (x, y) = xxx_todo_changeme31 + (z, ) = xxx_todo_changeme32 return "%(z)s = (%(x)s & %(y)s);" % locals() and_ = AND() @@ -1222,7 +1256,9 @@ def impl(self, x): return ~x - def c_code(self, node, name, (x,), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme33, xxx_todo_changeme34, sub): + (x,) = xxx_todo_changeme33 + (z, ) = xxx_todo_changeme34 return "%(z)s = (~%(x)s);" % locals() invert = Invert() @@ -1238,14 +1274,18 @@ # The built-in max function don't support complex type return numpy.maximum(*inputs) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme35, xxx_todo_changeme36, sub): + (x, y) = xxx_todo_changeme35 + (z, ) = xxx_todo_changeme36 if any([i.type in complex_types for i in node.inputs]): raise NotImplementedError() # Test for both y>x and x>=y to detect NaN return ('%(z)s = ((%(y)s)>(%(x)s)? (%(y)s): ' '((%(x)s)>=(%(y)s)? (%(x)s): nan("")));' % locals()) - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme37, xxx_todo_changeme38): + (x, y) = xxx_todo_changeme37 + (gz, ) = xxx_todo_changeme38 if gz.type in complex_types: # max is currently defined for complex_types, # but the gradient for complex is not. @@ -1272,13 +1312,17 @@ # The built-in min function don't support complex type return numpy.minimum(*inputs) - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme39, xxx_todo_changeme40, sub): + (x, y) = xxx_todo_changeme39 + (z, ) = xxx_todo_changeme40 if any([i.type in complex_types for i in node.inputs]): raise NotImplementedError() return ('%(z)s = ((%(y)s)<(%(x)s)? (%(y)s): ' '((%(x)s)<=(%(y)s)? (%(x)s): nan("")));' % locals()) - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme41, xxx_todo_changeme42): + (x, y) = xxx_todo_changeme41 + (gz, ) = xxx_todo_changeme42 if gz.type in complex_types: # min is currently defined for complex_types, # but the gradient for complex is not. @@ -1302,13 +1346,15 @@ def impl(self, *inputs): return sum(inputs) - def c_code(self, node, name, inputs, (z, ), sub): + def c_code(self, node, name, inputs, xxx_todo_changeme43, sub): + (z, ) = xxx_todo_changeme43 if not inputs: return z + " = 0;" else: return z + " = " + " + ".join(inputs) + ";" - def grad(self, inputs, (gz, )): + def grad(self, inputs, xxx_todo_changeme44): + (gz, ) = xxx_todo_changeme44 if gz.type in complex_types: raise NotImplementedError() if self(*inputs).type in discrete_types: @@ -1338,13 +1384,15 @@ def impl(self, *inputs): return numpy.product(inputs) - def c_code(self, node, name, inputs, (z, ), sub): + def c_code(self, node, name, inputs, xxx_todo_changeme45, sub): + (z, ) = xxx_todo_changeme45 if not inputs: return z + " = 1;" else: return z + " = " + " * ".join(inputs) + ";" - def grad(self, inputs, (gz, )): + def grad(self, inputs, xxx_todo_changeme46): + (gz, ) = xxx_todo_changeme46 retval = [] # The following 3 lines verify that gz is complex when the @@ -1386,10 +1434,14 @@ def impl(self, x, y): return x - y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme47, xxx_todo_changeme48, sub): + (x, y) = xxx_todo_changeme47 + (z, ) = xxx_todo_changeme48 return "%(z)s = %(x)s - %(y)s;" % locals() - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme49, xxx_todo_changeme50): + (x, y) = xxx_todo_changeme49 + (gz, ) = xxx_todo_changeme50 if gz.type in complex_types: raise NotImplementedError() @@ -1466,8 +1518,10 @@ else: return x / y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme51, xxx_todo_changeme52, sub): # we generate good c code only when both are complex! + (x, y) = xxx_todo_changeme51 + (z, ) = xxx_todo_changeme52 if sum([node.inputs[0].type in complex_types, node.inputs[1].type in complex_types]) == 1: raise NotImplementedError('type not supported', type) @@ -1476,8 +1530,10 @@ return "%(z)s = ((double)%(x)s) / %(y)s;" % locals() return "%(z)s = %(x)s / %(y)s;" % locals() - def grad(self, (x, y), (gz, )): - + def grad(self, xxx_todo_changeme53, xxx_todo_changeme54): + + (x, y) = xxx_todo_changeme53 + (gz, ) = xxx_todo_changeme54 if x.type in complex_types: raise NotImplementedError() @@ -1516,16 +1572,18 @@ # of string formatting. return "#define THEANO_MACRO_MOD(x,y) (x % y)" - def c_code(self, node, name, (x, y), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme55, xxx_todo_changeme56, sub): + (x, y) = xxx_todo_changeme55 + (z,) = xxx_todo_changeme56 t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]]) - if t in imap(str, discrete_types): + if t in map(str, discrete_types): x_div_y_pp = '(%(x)s / %(y)s)' % locals() x_div_y_mp = '((-%(x)s) / %(y)s)' % locals() x_mod_y_mp = 'THEANO_MACRO_MOD((-%(x)s), %(y)s)' % locals() x_div_y_pm = '(%(x)s / (-%(y)s))' % locals() x_mod_y_pm = 'THEANO_MACRO_MOD(%(x)s, (-%(y)s))' % locals() x_div_y_mm = '((-%(x)s) / (-%(y)s))' % locals() - elif t in imap(str, float_types): + elif t in map(str, float_types): # We need to call different functions of math.h # depending on the type if t == 'float32': @@ -1604,38 +1662,38 @@ # of string formatting. return "#define THEANO_MACRO_MOD(x,y) (x % y)" - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme57, xxx_todo_changeme58, sub): """ We want the result to have the same sign as python, not the other implementation of mod. """ - # raise NotImplementedError("Unlike Python, C's modulo returns negative - # modulo on negative dividend (to implement)") + (x, y) = xxx_todo_changeme57 + (z, ) = xxx_todo_changeme58 t = node.inputs[0].type.upcast(*[i.type for i in node.inputs[1:]]) - if (str(t) in imap(str, discrete_types) or + if (str(t) in map(str, discrete_types) or t in ['uint8', 'int8', 'uint16', 'int16'] or t in ['uint32', 'int32', 'uint64', 'int64'] or t in discrete_types): # The above or's should not be needed anymore. However, for now we # keep them out of safety, and verify they are useless with an # assert. - assert str(t) in imap(str, discrete_types) + assert str(t) in map(str, discrete_types) x_mod_y = "THEANO_MACRO_MOD(%(x)s, %(y)s)" % locals() x_mod_ymm = "THEANO_MACRO_MOD(-%(x)s, -%(y)s)" % locals() x_mod_ypm = "THEANO_MACRO_MOD(%(x)s, -%(y)s)" % locals() x_mod_ymp = "THEANO_MACRO_MOD(-%(x)s, %(y)s)" % locals() - elif (str(t) in imap(str, float_types) or + elif (str(t) in map(str, float_types) or t in ['float32', 'float64'] or t in float_types): # The above or's should not be needed anymore. However, for now we # keep them out of safety, and verify they are useless with an # assert. - assert str(t) in imap(str, float_types) + assert str(t) in map(str, float_types) x_mod_y = "fmod(%(x)s,%(y)s)" % locals() x_mod_ymm = "fmod(-%(x)s,-%(y)s)" % locals() x_mod_ypm = "fmod(%(x)s,-%(y)s)" % locals() x_mod_ymp = "fmod(-%(x)s,%(y)s)" % locals() - elif str(t) in imap(str, complex_types): + elif str(t) in map(str, complex_types): raise self.complex_error else: raise NotImplementedError('type not supported', t) @@ -1654,7 +1712,9 @@ } """) % locals() - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme59, xxx_todo_changeme60): + (x, y) = xxx_todo_changeme59 + (gz, ) = xxx_todo_changeme60 return [x.zeros_like(dtype=theano.config.floatX), y.zeros_like(dtype=theano.config.floatX)] mod = Mod(upcast_out, name='mod') @@ -1664,13 +1724,17 @@ def impl(self, x, y): return x ** y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme61, xxx_todo_changeme62, sub): + (x, y) = xxx_todo_changeme61 + (z, ) = xxx_todo_changeme62 if (node.inputs[0].type in complex_types or node.inputs[1].type in complex_types): raise NotImplementedError('type not supported', type) return "%(z)s = pow(%(x)s, %(y)s);" % locals() - def grad(self, (x, y), (gz, )): + def grad(self, xxx_todo_changeme63, xxx_todo_changeme64): + (x, y) = xxx_todo_changeme63 + (gz, ) = xxx_todo_changeme64 if gz.type in complex_types: raise NotImplementedError() @@ -1684,7 +1748,9 @@ return (first_part, second_part) - def c_code_contiguous(self, node, name, (x, y), (z, ), sub): + def c_code_contiguous(self, node, name, xxx_todo_changeme65, xxx_todo_changeme66, sub): + (x, y) = xxx_todo_changeme65 + (z, ) = xxx_todo_changeme66 if not theano.config.lib.amdlibm: raise theano.gof.utils.MethodNotDefined() @@ -1738,10 +1804,14 @@ else: return x - def c_code(self, node, name, (x, min, max), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme67, xxx_todo_changeme68, sub): + (x, min, max) = xxx_todo_changeme67 + (z, ) = xxx_todo_changeme68 return "%(z)s = %(x)s < %(min)s ? %(min)s : %(x)s > %(max)s ? %(max)s : %(x)s;" % locals() - def grad(self, (x, mn, mx), (gz, )): + def grad(self, xxx_todo_changeme69, xxx_todo_changeme70): + (x, mn, mx) = xxx_todo_changeme69 + (gz, ) = xxx_todo_changeme70 assert gz.type not in complex_types gx = ((x > mn) & (x < mx)) * gz gmn = (x < mn) * gz @@ -1754,7 +1824,7 @@ return v.zeros_like().astype(config.floatX) return v - return map(handle_int, [gx, gmn, gmx]) + return list(map(handle_int, [gx, gmn, gmx])) # Don't allow complex even if numpy do # As there is no mathematical reason for this function on complex @@ -1765,7 +1835,9 @@ def impl(self, x, y): return y - def c_code(self, node, name, (x, y), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme71, xxx_todo_changeme72, sub): + (x, y) = xxx_todo_changeme71 + (z, ) = xxx_todo_changeme72 return "%(z)s = %(y)s;" % locals() def connection_pattern(self, node): @@ -1775,8 +1847,10 @@ return [[False], [True]] - def grad(self, (x, y), (gz, )): - + def grad(self, xxx_todo_changeme73, xxx_todo_changeme74): + + (x, y) = xxx_todo_changeme73 + (gz, ) = xxx_todo_changeme74 if y.type in continuous_types: # x is disconnected because the elements of x are not used return DisconnectedType()(), gz @@ -1794,10 +1868,14 @@ def impl(self, input): return input - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme75, xxx_todo_changeme76, sub): + (x, ) = xxx_todo_changeme75 + (z, ) = xxx_todo_changeme76 return "%(z)s = %(x)s;" % locals() - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme77, xxx_todo_changeme78): + (x, ) = xxx_todo_changeme77 + (gz, ) = xxx_todo_changeme78 if x.type in continuous_types: return gz, else: @@ -1820,10 +1898,14 @@ def impl(self, input): return self.ctor(input) - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme79, xxx_todo_changeme80, sub): + (x, ) = xxx_todo_changeme79 + (z, ) = xxx_todo_changeme80 return "%s = (%s)%s;" % (z, node.outputs[0].type.dtype_specs()[1], x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme81, xxx_todo_changeme82): + (x, ) = xxx_todo_changeme81 + (gz, ) = xxx_todo_changeme82 if self.o_type in continuous_types: return [gz] else: @@ -1893,7 +1975,9 @@ def impl(self, x): return numpy.abs(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme83, xxx_todo_changeme84): + (x, ) = xxx_todo_changeme83 + (gz, ) = xxx_todo_changeme84 if self(x).type in discrete_types: if x.type in discrete_types: return [x.zeros_like(dtype=theano.config.floatX)] @@ -1902,7 +1986,9 @@ return gz * x / abs(x), # formula works for complex and real - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme85, xxx_todo_changeme86, sub): + (x, ) = xxx_todo_changeme85 + (z, ) = xxx_todo_changeme86 type = node.inputs[0].type if type in int_types: return "%(z)s = abs(%(x)s);" % locals() @@ -1919,8 +2005,10 @@ #casting to output type is handled by filter return numpy.sign(x) - def grad(self, (x, ), (gz, )): - + def grad(self, xxx_todo_changeme87, xxx_todo_changeme88): + + (x, ) = xxx_todo_changeme87 + (gz, ) = xxx_todo_changeme88 rval = x.zeros_like() if rval.type.dtype in discrete_types: @@ -1928,9 +2016,11 @@ return [rval] - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme89, xxx_todo_changeme90, sub): #casting is done by compiler #TODO: use copysign + (x, ) = xxx_todo_changeme89 + (z, ) = xxx_todo_changeme90 type = node.inputs[0].type if type in float_types: return "%(z)s = (%(x)s >= 0) ? (%(x)s == 0) ? 0.0 : 1.0 : -1.0;" % locals() @@ -1951,7 +2041,9 @@ def impl(self, x): return numpy.ceil(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme91, xxx_todo_changeme92): + (x,) = xxx_todo_changeme91 + (gz,) = xxx_todo_changeme92 rval = x.zeros_like() if rval.type.dtype in discrete_types: @@ -1959,7 +2051,9 @@ return [rval] - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme93, xxx_todo_changeme94, sub): + (x,) = xxx_todo_changeme93 + (z,) = xxx_todo_changeme94 return "%(z)s = ceil(%(x)s);" % locals() ceil = Ceil(same_out_nocomplex, name='ceil') @@ -1968,7 +2062,9 @@ def impl(self, x): return numpy.floor(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme95, xxx_todo_changeme96): + (x,) = xxx_todo_changeme95 + (gz,) = xxx_todo_changeme96 rval = x.zeros_like() if rval.type.dtype in discrete_types: @@ -1976,7 +2072,9 @@ return [rval] - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme97, xxx_todo_changeme98, sub): + (x,) = xxx_todo_changeme97 + (z,) = xxx_todo_changeme98 return "%(z)s = floor(%(x)s);" % locals() floor = Floor(same_out_nocomplex, name='floor') @@ -1985,10 +2083,14 @@ def impl(self, x): return numpy.trunc(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme99, xxx_todo_changeme100): + (x,) = xxx_todo_changeme99 + (gz,) = xxx_todo_changeme100 return [x.zeros_like().astype(theano.config.floatX)] - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme101, xxx_todo_changeme102, sub): + (x,) = xxx_todo_changeme101 + (z,) = xxx_todo_changeme102 return "%(z)s = %(x)s >= 0? floor(%(x)s): -floor(-%(x)s);" % locals() trunc = Trunc(same_out_nocomplex, name='trunc') @@ -2003,7 +2105,9 @@ def impl(self, x): return numpy.round(x) - def c_code___(self, node, name, (x, ), (z, ), sub): + def c_code___(self, node, name, xxx_todo_changeme103, xxx_todo_changeme104, sub): + (x, ) = xxx_todo_changeme103 + (z, ) = xxx_todo_changeme104 typ = node.outputs[0].type.dtype if not typ in ['float32', 'float64']: Exception("The output should be float32 or float64") @@ -2088,7 +2192,9 @@ return round_half_away_from_zero_vec(x) - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme105, xxx_todo_changeme106, sub): + (x, ) = xxx_todo_changeme105 + (z, ) = xxx_todo_changeme106 if node.outputs[0].type.dtype in ['float32', 'float64']: return "%(z)s = round(%(x)s);" % locals() else: @@ -2100,7 +2206,9 @@ def impl(self, x): return -x - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme107, xxx_todo_changeme108): + (x,) = xxx_todo_changeme107 + (gz,) = xxx_todo_changeme108 if self(x).type in discrete_types: if x.type in discrete_types: return [x.zeros_like(dtype=theano.config.floatX)] @@ -2109,7 +2217,9 @@ return -gz, - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme109, xxx_todo_changeme110, sub): + (x,) = xxx_todo_changeme109 + (z,) = xxx_todo_changeme110 return "%(z)s = -%(x)s;" % locals() neg = Neg(same_out, name='neg') @@ -2119,7 +2229,9 @@ def impl(self, x): return 1.0 / x - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme111, xxx_todo_changeme112): + (x,) = xxx_todo_changeme111 + (gz,) = xxx_todo_changeme112 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2130,7 +2242,9 @@ return -gz / (x * x), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme113, xxx_todo_changeme114, sub): + (x,) = xxx_todo_changeme113 + (z,) = xxx_todo_changeme114 return "%(z)s = 1.0 / %(x)s;" % locals() inv = Inv(upgrade_to_float, name='inv') @@ -2143,7 +2257,9 @@ def impl(self, x): return numpy.log(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme115, xxx_todo_changeme116): + (x,) = xxx_todo_changeme115 + (gz,) = xxx_todo_changeme116 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2154,10 +2270,12 @@ return gz / x, - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme117, xxx_todo_changeme118, sub): #todo: the version using log2 seems to be very slightly faster # on some machines for some reason, check if it's worth switching #return "%(z)s = log2(%(x)s) * 0.69314718055994529;" % locals() + (x,) = xxx_todo_changeme117 + (z,) = xxx_todo_changeme118 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = log(%(x)s);" % locals() @@ -2172,7 +2290,9 @@ def impl(self, x): return numpy.log2(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme119, xxx_todo_changeme120): + (x,) = xxx_todo_changeme119 + (gz,) = xxx_todo_changeme120 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2183,7 +2303,9 @@ return gz / (x * math.log(2.0)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme121, xxx_todo_changeme122, sub): + (x, ) = xxx_todo_changeme121 + (z, ) = xxx_todo_changeme122 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = log2(%(x)s);" % locals() @@ -2198,7 +2320,9 @@ def impl(self, x): return numpy.log10(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme123, xxx_todo_changeme124): + (x,) = xxx_todo_changeme123 + (gz,) = xxx_todo_changeme124 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2209,7 +2333,9 @@ return gz / (x * numpy.log(10.0)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme125, xxx_todo_changeme126, sub): + (x, ) = xxx_todo_changeme125 + (z, ) = xxx_todo_changeme126 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = log10(%(x)s);" % locals() @@ -2221,7 +2347,9 @@ def impl(self, x): return numpy.log1p(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme127, xxx_todo_changeme128): + (x,) = xxx_todo_changeme127 + (gz,) = xxx_todo_changeme128 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2232,7 +2360,9 @@ return [gz / (1 + x)] - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme129, xxx_todo_changeme130, sub): + (x, ) = xxx_todo_changeme129 + (z, ) = xxx_todo_changeme130 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = log1p(%(x)s);" % locals() @@ -2246,7 +2376,9 @@ def impl(self, x): return numpy.exp(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme131, xxx_todo_changeme132): + (x, ) = xxx_todo_changeme131 + (gz, ) = xxx_todo_changeme132 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2257,7 +2389,9 @@ return gz * exp(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme133, xxx_todo_changeme134, sub): + (x, ) = xxx_todo_changeme133 + (z, ) = xxx_todo_changeme134 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = exp(%(x)s);" % locals() @@ -2268,7 +2402,9 @@ def impl(self, x): return numpy.exp2(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme135, xxx_todo_changeme136): + (x, ) = xxx_todo_changeme135 + (gz, ) = xxx_todo_changeme136 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2279,7 +2415,9 @@ return gz * exp2(x) * log(numpy.cast[x.type](2)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme137, xxx_todo_changeme138, sub): + (x, ) = xxx_todo_changeme137 + (z, ) = xxx_todo_changeme138 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = exp2(%(x)s);" % locals() @@ -2290,7 +2428,9 @@ def impl(self, x): return numpy.expm1(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme139, xxx_todo_changeme140): + (x, ) = xxx_todo_changeme139 + (gz, ) = xxx_todo_changeme140 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2301,7 +2441,9 @@ return gz * exp(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme141, xxx_todo_changeme142, sub): + (x, ) = xxx_todo_changeme141 + (z, ) = xxx_todo_changeme142 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = exp(%(x)s) - 1;" % locals() @@ -2312,7 +2454,9 @@ def impl(self, x): return x * x - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme143, xxx_todo_changeme144): + (x, ) = xxx_todo_changeme143 + (gz, ) = xxx_todo_changeme144 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2323,7 +2467,9 @@ return gz * x * 2, - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme145, xxx_todo_changeme146, sub): + (x, ) = xxx_todo_changeme145 + (z, ) = xxx_todo_changeme146 return "%(z)s = %(x)s * %(x)s;" % locals() sqr = Sqr(same_out, name='sqr') @@ -2332,7 +2478,9 @@ def impl(self, x): return numpy.sqrt(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme147, xxx_todo_changeme148): + (x,) = xxx_todo_changeme147 + (gz,) = xxx_todo_changeme148 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2343,7 +2491,9 @@ return (gz * 0.5) / sqrt(x), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme149, xxx_todo_changeme150, sub): + (x,) = xxx_todo_changeme149 + (z,) = xxx_todo_changeme150 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = sqrt(%(x)s);" % locals() @@ -2354,7 +2504,9 @@ def impl(self, x): return numpy.deg2rad(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme151, xxx_todo_changeme152): + (x,) = xxx_todo_changeme151 + (gz,) = xxx_todo_changeme152 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2365,7 +2517,9 @@ return gz * numpy.asarray(numpy.pi / 180, gz.type), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme153, xxx_todo_changeme154, sub): + (x,) = xxx_todo_changeme153 + (z,) = xxx_todo_changeme154 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = %(x)s * (M_PI / 180.0);" % locals() @@ -2376,7 +2530,9 @@ def impl(self, x): return numpy.rad2deg(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme155, xxx_todo_changeme156): + (x,) = xxx_todo_changeme155 + (gz,) = xxx_todo_changeme156 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2387,7 +2543,9 @@ return gz * numpy.asarray(180. / numpy.pi, gz.type), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme157, xxx_todo_changeme158, sub): + (x,) = xxx_todo_changeme157 + (z,) = xxx_todo_changeme158 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = %(x)s * (180.0 / M_PI);" % locals() @@ -2401,7 +2559,9 @@ def impl(self, x): return numpy.cos(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme159, xxx_todo_changeme160): + (x, ) = xxx_todo_changeme159 + (gz, ) = xxx_todo_changeme160 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2412,7 +2572,9 @@ return -gz * sin(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme161, xxx_todo_changeme162, sub): + (x, ) = xxx_todo_changeme161 + (z, ) = xxx_todo_changeme162 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = cos(%(x)s);" % locals() @@ -2423,7 +2585,9 @@ def impl(self, x): return numpy.arccos(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme163, xxx_todo_changeme164): + (x,) = xxx_todo_changeme163 + (gz,) = xxx_todo_changeme164 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2434,7 +2598,9 @@ return - gz / sqrt(numpy.cast[x.type](1) - sqr(x)), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme165, xxx_todo_changeme166, sub): + (x,) = xxx_todo_changeme165 + (z,) = xxx_todo_changeme166 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = acos(%(x)s);" % locals() @@ -2448,7 +2614,9 @@ def impl(self, x): return numpy.sin(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme167, xxx_todo_changeme168): + (x, ) = xxx_todo_changeme167 + (gz, ) = xxx_todo_changeme168 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2459,7 +2627,9 @@ return gz * cos(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme169, xxx_todo_changeme170, sub): + (x, ) = xxx_todo_changeme169 + (z, ) = xxx_todo_changeme170 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = sin(%(x)s);" % locals() @@ -2470,7 +2640,9 @@ def impl(self, x): return numpy.arcsin(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme171, xxx_todo_changeme172): + (x,) = xxx_todo_changeme171 + (gz,) = xxx_todo_changeme172 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2481,7 +2653,9 @@ return gz / sqrt(numpy.cast[x.type](1) - sqr(x)), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme173, xxx_todo_changeme174, sub): + (x,) = xxx_todo_changeme173 + (z,) = xxx_todo_changeme174 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = asin(%(x)s);" % locals() @@ -2492,7 +2666,9 @@ def impl(self, x): return numpy.tan(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme175, xxx_todo_changeme176): + (x,) = xxx_todo_changeme175 + (gz,) = xxx_todo_changeme176 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2503,7 +2679,9 @@ return gz / sqr(cos(x)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme177, xxx_todo_changeme178, sub): + (x, ) = xxx_todo_changeme177 + (z, ) = xxx_todo_changeme178 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = tan(%(x)s);" % locals() @@ -2514,7 +2692,9 @@ def impl(self, x): return numpy.arctan(x) - def grad(self, (x,), (gz,)): + def grad(self, xxx_todo_changeme179, xxx_todo_changeme180): + (x,) = xxx_todo_changeme179 + (gz,) = xxx_todo_changeme180 if gz.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2525,7 +2705,9 @@ return gz / (numpy.cast[x.type](1) + sqr(x)), - def c_code(self, node, name, (x,), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme181, xxx_todo_changeme182, sub): + (x,) = xxx_todo_changeme181 + (z,) = xxx_todo_changeme182 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = atan(%(x)s);" % locals() @@ -2536,7 +2718,9 @@ def impl(self, y, x): return numpy.arctan2(y, x) - def grad(self, (y, x), (gz,)): + def grad(self, xxx_todo_changeme183, xxx_todo_changeme184): + (y, x) = xxx_todo_changeme183 + (gz,) = xxx_todo_changeme184 if gz.type in complex_types: raise NotImplementedError() else: @@ -2556,7 +2740,9 @@ return [gz * x / (sqr(x) + sqr(y)), gz * neg(y) / (sqr(x) + sqr(y))] - def c_code(self, node, name, (y, x), (z,), sub): + def c_code(self, node, name, xxx_todo_changeme185, xxx_todo_changeme186, sub): + (y, x) = xxx_todo_changeme185 + (z,) = xxx_todo_changeme186 if (node.inputs[0].type in complex_types or node.inputs[1].type in complex_types): raise NotImplementedError('type not supported', type) @@ -2571,7 +2757,9 @@ def impl(self, x): return numpy.cosh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme187, xxx_todo_changeme188): + (x, ) = xxx_todo_changeme187 + (gz, ) = xxx_todo_changeme188 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2582,7 +2770,9 @@ return gz * sinh(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme189, xxx_todo_changeme190, sub): + (x, ) = xxx_todo_changeme189 + (z, ) = xxx_todo_changeme190 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = cosh(%(x)s);" % locals() @@ -2593,7 +2783,9 @@ def impl(self, x): return numpy.arccosh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme191, xxx_todo_changeme192): + (x, ) = xxx_todo_changeme191 + (gz, ) = xxx_todo_changeme192 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2604,7 +2796,9 @@ return gz / sqrt(sqr(x) - numpy.cast[x.type](1)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme193, xxx_todo_changeme194, sub): + (x, ) = xxx_todo_changeme193 + (z, ) = xxx_todo_changeme194 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = acosh(%(x)s);" % locals() @@ -2618,7 +2812,9 @@ def impl(self, x): return numpy.sinh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme195, xxx_todo_changeme196): + (x, ) = xxx_todo_changeme195 + (gz, ) = xxx_todo_changeme196 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2629,7 +2825,9 @@ return gz * cosh(x), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme197, xxx_todo_changeme198, sub): + (x, ) = xxx_todo_changeme197 + (z, ) = xxx_todo_changeme198 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = sinh(%(x)s);" % locals() @@ -2640,7 +2838,9 @@ def impl(self, x): return numpy.arcsinh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme199, xxx_todo_changeme200): + (x, ) = xxx_todo_changeme199 + (gz, ) = xxx_todo_changeme200 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2651,7 +2851,9 @@ return gz / sqrt(sqr(x) + numpy.cast[x.type](1)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme201, xxx_todo_changeme202, sub): + (x, ) = xxx_todo_changeme201 + (z, ) = xxx_todo_changeme202 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = asinh(%(x)s);" % locals() @@ -2666,7 +2868,9 @@ def impl(self, x): return numpy.tanh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme203, xxx_todo_changeme204): + (x, ) = xxx_todo_changeme203 + (gz, ) = xxx_todo_changeme204 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2677,7 +2881,9 @@ return gz * (1 - sqr(tanh(x))), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme205, xxx_todo_changeme206, sub): + (x, ) = xxx_todo_changeme205 + (z, ) = xxx_todo_changeme206 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = tanh(%(x)s);" % locals() @@ -2688,7 +2894,9 @@ def impl(self, x): return numpy.arctanh(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme207, xxx_todo_changeme208): + (x, ) = xxx_todo_changeme207 + (gz, ) = xxx_todo_changeme208 if x.type in complex_types: raise NotImplementedError() if self(x).type in discrete_types: @@ -2699,7 +2907,9 @@ return gz / (numpy.cast[x.type](1) - sqr(x)), - def c_code(self, node, name, (x, ), (z, ), sub): + def c_code(self, node, name, xxx_todo_changeme209, xxx_todo_changeme210, sub): + (x, ) = xxx_todo_changeme209 + (z, ) = xxx_todo_changeme210 if node.inputs[0].type in complex_types: raise NotImplementedError('type not supported', type) return "%(z)s = atanh(%(x)s);" % locals() @@ -2711,7 +2921,9 @@ def impl(self, x): return numpy.real(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme211, xxx_todo_changeme212): + (x, ) = xxx_todo_changeme211 + (gz, ) = xxx_todo_changeme212 return [complex(gz, 0)] real = Real(real_out, name='real') @@ -2721,7 +2933,9 @@ def impl(self, x): return numpy.imag(x) - def grad(self, (x, ), (gz, )): + def grad(self, xxx_todo_changeme213, xxx_todo_changeme214): + (x, ) = xxx_todo_changeme213 + (gz, ) = xxx_todo_changeme214 if x.type in complex_types: return [complex(0, gz)] elif x.type in float_types: @@ -2735,7 +2949,7 @@ def impl(self, x): return numpy.angle(x) - def grad(self, (c, ), (gtheta, )): + def grad(self, xxx_todo_changeme215, xxx_todo_changeme216): # y = x.imag # r = sqrt(y**2 + x.real**2) # g = y/r @@ -2746,6 +2960,8 @@ # else: # theta = -numpy.arcsin(g)+numpy.pi + (c, ) = xxx_todo_changeme215 + (gtheta, ) = xxx_todo_changeme216 x = real(c) y = imag(c) r = abs(c) @@ -2780,7 +2996,9 @@ def impl(self, x, y): return numpy.complex(x, y) - def grad(self, (x, y), (gz,)): + def grad(self, xxx_todo_changeme217, xxx_todo_changeme218): + (x, y) = xxx_todo_changeme217 + (gz,) = xxx_todo_changeme218 return [cast(real(gz), x.type.dtype), cast(imag(gz), y.type.dtype)] complex = Complex(name='complex') @@ -2807,7 +3025,9 @@ else: return numpy.complex128(numpy.complex(x, y)) - def grad(self, (r, theta), (gz,)): + def grad(self, xxx_todo_changeme219, xxx_todo_changeme220): + (r, theta) = xxx_todo_changeme219 + (gz,) = xxx_todo_changeme220 gr = gz * complex_from_polar(1, theta) gtheta = gz * complex_from_polar(r, -theta) return [gr, gtheta] @@ -2842,10 +3062,10 @@ def init_c_code(self): """Return the C code for this Composite Op. """ subd = dict( - zip(self.fgraph.inputs, - ["%%(i%i)s" % i for i in xrange(len(self.fgraph.inputs))]) - + zip(self.fgraph.outputs, - ["%%(o%i)s" % i for i in xrange(len(self.fgraph.outputs))])) + list(zip(self.fgraph.inputs, + ["%%(i%i)s" % i for i in range(len(self.fgraph.inputs))])) + + list(zip(self.fgraph.outputs, + ["%%(o%i)s" % i for i in range(len(self.fgraph.outputs))]))) for orphan in self.fgraph.variables: # fgraph.orphans: if orphan.owner is None and orphan not in self.fgraph.inputs: @@ -2970,7 +3190,7 @@ assert len(inputs) == self.nin res = theano.compile.rebuild_colRefactoringTool: Refactored ./theano/scalar/__init__.py RefactoringTool: Refactored ./theano/sandbox/theano_object.py RefactoringTool: Refactored ./theano/sandbox/test_theano_object.py lect_shared( self.outputs, - replace=dict(zip(self.inputs, inputs)), + replace=dict(list(zip(self.inputs, inputs))), rebuild_strict=False) # After rebuild_collect_shared, the Variable in inputs # are not necessarily in the graph represented by res. @@ -2985,7 +3205,7 @@ storage[0] = impl(inputs) def impl(self, *inputs): - output_storage = [[None] for i in xrange(self.nout)] + output_storage = [[None] for i in range(self.nout)] self.perform(None, inputs, output_storage) return utils.to_return_values([storage[0] for storage in output_storage]) @@ -2994,10 +3214,10 @@ raise NotImplementedError("grad is not implemented for Composite") def c_code(self, node, nodename, inames, onames, sub): - d = dict(zip(["i%i" % i for i in xrange(len(inames))], - inames) + - zip(["o%i" % i for i in xrange(len(onames))], - onames), + d = dict(list(zip(["i%i" % i for i in range(len(inames))], + inames)) + + list(zip(["o%i" % i for i in range(len(onames))], + onames)), **sub) d['nodename'] = nodename if not 'id' in sub: --- ./theano/scalar/__init__.py (original) +++ ./theano/scalar/__init__.py (refactored) @@ -1,4 +1,4 @@ -from basic import * +from .basic import * -from basic_scipy import * +from .basic_scipy import * --- ./theano/sandbox/theano_object.py (original) +++ ./theano/sandbox/theano_object.py (refactored) @@ -168,10 +168,10 @@ if key not in cache: inputs = [a() for a in args_types] - print 'compiling', fn, 'for inputs', inputs + print('compiling', fn, 'for inputs', inputs) rval = fn(o_self, *inputs) - print 'compiling to compute outputs', rval.outputs + print('compiling to compute outputs', rval.outputs) if isinstance(rval.outputs, (tuple, list)): all_required_inputs = theano.gof.graph.inputs(rval.outputs) --- ./theano/sandbox/test_theano_object.py (original) +++ ./theano/sandbox/test_theano_object.py (refactored) @@ -1,11 +1,11 @@ -from theano_object import * +from .theano_object import * RUN_TESTS = False def run(TF): def deco(f): if TF and RUN_TESTS: - print 'running test', f.__name__ + print('running test', f.__name__) f() if RUN_TESTS: return f @@ -65,39 +65,39 @@ @run(False) def test_misc_prints(): MM = MyModule() - print MM - print 'add', MM.add(4) - print 'b', MM.value(MM.b) - print 'sub', MM.sub(45) - print 'b', MM.value(MM.b) - print MM.sub(23) - print MM.add(9) - print MM.add(19) - print 'b', MM.value(MM.b) - print 'a', MM.value(MM.a) + print(MM) + print('add', MM.add(4)) + print('b', MM.value(MM.b)) + print('sub', MM.sub(45)) + print('b', MM.value(MM.b)) + print(MM.sub(23)) + print(MM.add(9)) + print(MM.add(19)) + print('b', MM.value(MM.b)) + print('a', MM.value(MM.a)) MM.value_set(MM.a,6) MM.value_set(MM.b,6) - print MM.add(6) + print(MM.add(6)) try: MM.b = 5 - except Exception, e: - print e + except Exception as e: + print(e) MM.del_member(MM.b) try: - print 'b', MM.value(MM.b) - except Exception, e: - print e + print('b', MM.value(MM.b)) + except Exception as e: + print(e) MM.b = 'asdffd' try: - print 'b', MM.value(MM.b) - except Exception, e: - print e + print('b', MM.value(MM.b)) + except Exception as e: + print(e) try: - print 'b', MM.value(MM.b) - except Exception, e: - print 'E', e - print MM.b - print 'a', MM.value(MM.a) + print('b', MM.value(MM.b)) + except Exception as e: + print('E', e) + print(MM.b) + print('a'RefactoringTool: Refactored ./theano/sandbox/test_scan.py RefactoringTool: Refactored ./theano/sandbox/test_rng_mrg.py RefactoringTool: Refactored ./theano/sandbox/test_neighbours.py RefactoringTool: No changes to ./theano/sandbox/test_neighbourhoods.py RefactoringTool: Refactored ./theano/sandbox/test_multinomial.py RefactoringTool: Refactored ./theano/sandbox/symbolic_module.py , MM.value(MM.a)) --- ./theano/sandbox/test_scan.py (original) +++ ./theano/sandbox/test_scan.py (refactored) @@ -1,6 +1,6 @@ import theano import numpy -import scan +from . import scan def test_001(): --- ./theano/sandbox/test_rng_mrg.py (original) +++ ./theano/sandbox/test_rng_mrg.py (refactored) @@ -309,7 +309,7 @@ dt = 0.0 avg_var = 0.0 - for i in xrange(steps): + for i in range(steps): t0 = time.time() ival = f(*inputs) assert ival.shape == sample_size @@ -637,7 +637,7 @@ dt = 0.0 avg_pvals = numpy.zeros(target_pvals.shape, dtype=config.floatX) - for i in xrange(steps): + for i in range(steps): t0 = time.time() ival = f() assert ival.shape == sample_size @@ -646,13 +646,13 @@ avg_pvals += ival avg_pvals /= steps - print 'random?[:10]\n', numpy.asarray(f()[:10]) - print prefix, 'mean', avg_pvals + print('random?[:10]\n', numpy.asarray(f()[:10])) + print(prefix, 'mean', avg_pvals) # < mean_rtol, 'bad mean? %s %s' % (str(avg_pvals), str(target_pvals)) - print numpy.mean(abs(avg_pvals - target_pvals)) - print prefix, 'time', dt - print prefix, 'elements', steps * numpy.prod(target_pvals.shape) - print prefix, 'samples/sec', steps * numpy.prod(target_pvals.shape) / dt + print(numpy.mean(abs(avg_pvals - target_pvals))) + print(prefix, 'time', dt) + print(prefix, 'elements', steps * numpy.prod(target_pvals.shape)) + print(prefix, 'samples/sec', steps * numpy.prod(target_pvals.shape) / dt) def test_multinomial(): --- ./theano/sandbox/test_neighbours.py (original) +++ ./theano/sandbox/test_neighbours.py (refactored) @@ -6,7 +6,7 @@ from theano import shared, function from theano.gof.python25 import any import theano.tensor as T -from neighbours import images2neibs, neibs2images, Images2Neibs +from .neighbours import images2neibs, neibs2images, Images2Neibs from theano.tests import unittest_tools --- ./theano/sandbox/test_multinomial.py (original) +++ ./theano/sandbox/test_multinomial.py (refactored) @@ -4,7 +4,7 @@ import theano from theano import config, function, tensor -import multinomial +from . import multinomial from theano.compile.mode import get_default_mode, predefined_linkers from theano.gof.python25 import any import theano.sandbox.cuda as cuda --- ./theano/sandbox/symbolic_module.py (original) +++ ./theano/sandbox/symbolic_module.py (refactored) @@ -1,6 +1,7 @@ import copy, inspect import theano import theano.tensor as T +import collections #import klass @@ -21,28 +22,25 @@ return True return isinstance(v, theano.Variable) and not k.startswith('_') r = {} - for key, val in dct.items(): - if filter(key, val): + for key, val in list(dct.items()): + if list(filter(key, val)): r[key] = val return r build_graph_rval = cls.build_graph() if not isinstance(build_graph_rval, dict): raise TypeError('%s.build_graph did not return dictionary' % cls) dct = just_symbolic(build_graph_rval) - for key, val in dct.items(): + for key, val in list(dct.items()): #print ' adding class attribute', key if isinstance(val, theano.Variable) and val.name is None: val.name = key - if callable(val): + if isinstance(val, collections.Callable): setattr(cls, key, staticmethod(val)) else: setattr(cls, key, val) -class SymbolicModule(object): +class SymbolicModule(object, metaclass=InitGraph): #installs class attributes from build_graph after declaration - __metaclass__ = InitGraph - - #if we call this function, it will return a new SymbolicModule def __new__(self, **kwargs): class SymMod(SymbolicModule): @staticmethod @@ -88,7 +86,7 @@ # this has the effect of creating new storage for these arguments # The common storage doesn't get messed with. inputs = [In(path_locals.get(name,name)) for name in args] - inputs.extend([v for k,v in common_inputs.items() if k not in args]) + inputs.extend([v for k,v in list(common_inputs.items()) if k not in args]) outputs = f() #print 'inputs', inputs #print 'outputs', outputs @@ -116,12 +114,12 @@ for s in modwalker(path_locals, val): yield s elif isinstance(val, dict): - for s in modwalker(path_locals, val.values()): + for s in modwalker(path_locals, list(val.values())): yield s elif issymbolicmodule(val): for s in modwalker(val.__dict__, [v for k,v in sym_items(val)]): yield s - elif isinstance(val, (basestring, int, float)): + elif isinstance(val, (str, int, float)): pass elif isinstance(val, theano.Variable): pass @@ -140,7 +138,7 @@ if isinstance(val, theano.Variable) and (val.owner is None) and (val not in inputs): inputs[val] = theano.In(val, value=theano.gof.Container(val, ['a'])) - assert len(inputs) == len([v for v in inputs.items()]) + assert len(inputs) == len([v for v in list(inputs.items())]) #Locate all the functions to compile, and compile them compiled_functions = {} @@ -172,7 +170,7 @@ reflected[thing] = cmod for key, val in sym_items(thing): setattr(CMod, key, reflect(val)) - elif isinstance(thing, (basestring, int, float)): + elif isinstance(thing, (str, int, float)): reflected[thing] = thing elif isinstance(thing, theano.Variable): if thing.owner is None: @@ -181,7 +179,7 @@ def setter(s, v): inputs[thing].value.storage[0] = v p = property(getter, setter) - print p + print(p) reflected[thing] = p else: reflected[thing] = None #TODO: how to reflect derived resuls? @@ -254,7 +252,7 @@ y = T.dmatrix() layers = [] _x = x - for i in xrange(n_hid_layers): + for i in range(n_hid_layers): layers.append(Layer(x=_x)) _x = layers[-1].y classif = LR(x=_x) @@ -264,7 +262,7 @@ rval = classif.params() for l in layers: rval.extend(l.params()) - print [id(r) for r in rval] + print([id(r) for r in rval]) return rval if 0: @@ -277,12 +275,12 @@ return locals() nnet = compile(NNet) -print nnet -print nnet.params() -print nnet.params.__dict__['finder'][NNet.layers[0].w] +print(nnet) +print(nnet.params()) +print(nnet.params.__dict__['finder'][NNet.layers[0].w]) nnet.params[NNet.layers[0].w] = [[6]] -print nnet.params() -print nnet.params() +print(nnet.params()) +print(nnet.params()) if 0: def deco(f): @@ -290,20 +288,20 @@ def __call__(self, *args, **kwargs): #return another SymbolicModule built like self def dummy(*dargs, **dkwargs): - print 'args', args, dargs - print 'kwargs', kwargs, dkwargs + print('args', args, dargs) + print('kwargs', kwargs, dkwargs) return f(*args, **kwargs) return deco(dummy) locals_dict = f() - for key, val in locals_dict.items(): + for key, val in list(locals_dict.items()): if isinstance(val, theano.Variable): try: kres = klass.KlassMember(val) except Exception: kres = klass.KlassVariable(val) setattr(SymMod, key, kres) - elif callable(val) and getattr(val, '__is_symbolic'): + elif isinstance(val, collections.CallablRefactoringTool: Refactored ./theano/sandbox/solve.py RefactoringTool: No changes to ./theano/sandbox/softsign.py RefactoringTool: Refactored ./theano/sandbox/scan_module/tests/test_utils.py RefactoringTool: Refactored ./theano/sandbox/scan_module/tests/test_scan.py e) and getattr(val, '__is_symbolic'): setattr(SymMod, key, val) return SymMod() @@ -336,7 +334,7 @@ ): hid = T.tanh(T.dot(x, w) + b) if top_part: - print 'top_part', top_part, 'kwargs', kwargs + print('top_part', top_part, 'kwargs', kwargs) top = top_part(x=hid, **kwargs) # SymbolicModule def params(): return top.params() + [w, b] else: @@ -344,12 +342,12 @@ return just_symbolic(locals()) if 0: - print 'logistic_regression', logistic_regression - print 'tanh_layer', tanh_layer - print 'nnet1', nnet1 + print('logistic_regression', logistic_regression) + print('tanh_layer', tanh_layer) + print('nnet1', nnet1) nnet1 = tanh_layer(logistic_regression) nnet2 = tanh_layer(nnet1) - print 'nnet2', nnet2 + print('nnet2', nnet2) if 0: class SymbolicModule(object): --- ./theano/sandbox/solve.py (original) +++ ./theano/sandbox/solve.py (refactored) @@ -38,7 +38,7 @@ output, = out ret=scipy.linalg.solve(A,b) if ret.dtype != node.outputs[0].dtype: - print >> sys.stderr, "WARNING: Solve.perform() required cast." + print("WARNING: Solve.perform() required cast.", file=sys.stderr) ret = theano._asarray(ret, dtype=node.outputs[0].dtype) output[0]=ret @@ -55,7 +55,7 @@ def test0(self): A=self.rng.randn(5,5) - b=numpy.array(range(5),dtype=float) + b=numpy.array(list(range(5)),dtype=float) x=scipy.linalg.solve(A,b) Ax = numpy.dot(A,x) are = tensor.numeric_grad.abs_rel_err(Ax, b) --- ./theano/sandbox/scan_module/tests/test_utils.py (original) +++ ./theano/sandbox/scan_module/tests/test_utils.py (refactored) @@ -1,4 +1,4 @@ -import cPickle +import pickle import numpy import unittest @@ -62,7 +62,7 @@ f_x = f(*pt) gx = [] # now iterate over the elements of x and call f on those + delta x - for i in xrange(len(pt)): + for i in range(len(pt)): if ndarray_mask[i]: # It is a ndarray that we can tweak if eps: @@ -72,7 +72,7 @@ if pt[i].ndim: _g = [] # it has several dimensions: - for pos in xrange(prod(pt[i].shape)): + for pos in range(prod(pt[i].shape)): t = pt[i].copy() t = t.flatten() t[pos] += _eps @@ -96,7 +96,7 @@ """Return the biggest relative error between g_pt and self.gx""" g_pt = [] - for i in xrange(len(_g_pt)): + for i in range(len(_g_pt)): if self.ndarray_mask[i]: g_pt.append(_g_pt[i]) elif isinstance(_g_pt[i], numpy.ndarray): --- ./theano/sandbox/scan_module/tests/test_scan.py (original) +++ ./theano/sandbox/scan_module/tests/test_scan.py (refactored) @@ -5,7 +5,7 @@ import sys import unittest -import cPickle +import pickle import numpy from numpy.testing import dec @@ -17,7 +17,7 @@ from theano.tests import unittest_tools as utt from numpy.testing.noseclasses import KnownFailureTest -from test_utils import * +from .test_utils import * import theano.sandbox.scan_module as scan_module from theano.sandbox.scan_module.scan_op import ScanOp @@ -60,7 +60,7 @@ rng = numpy.random.RandomState(utt.fetch_seed()) n_ins = len(inputs_info) - inputs = [tensor.matrix('u%d' % k) for k in xrange(n_ins)] + inputs = [tensor.matrix('u%d' % k) for k in range(n_ins)] scan_inputs = [] for inp, info in zip(inputs, inputs_info): scan_inputs.append(dict(input=inp, taps=[x['tap'] for x in @@ -79,11 +79,11 @@ scan_states.append( dict(initial=state, taps=[x['tap'] for x in info])) n_parameters = len(parameters_info) - parameters = [tensor.vector('p%d' % k) for k in xrange(n_parameters)] + parameters = [tensor.vector('p%d' % k) for k in range(n_parameters)] original_shared_values = [] shared_vars = [] - for k in xrange(n_shared_updates): + for k in range(n_shared_updates): data = rng.uniform(size=(4,)).astype(theano.config.floatX) original_shared_values.append(data) shared_vars.append(theano.shared(data, name='z%d' % k)) @@ -131,13 +131,13 @@ else: rval.append(arg + to_add) states_out = rval - pure_outs = [to_add ** 2 for x in xrange(n_outputs)] + pure_outs = [to_add ** 2 for x in range(n_outputs)] else: shared_outs = [sh * 5 for sh in shared_vars] states_out = [x for x in states_out] - pure_outs = [2 for x in xrange(n_outputs)] - return states_out + pure_outs, dict(zip(shared_vars, - shared_outs)) + pure_outs = [2 for x in range(n_outputs)] + return states_out + pure_outs, dict(list(zip(shared_vars, + shared_outs))) def execute_inner_graph(*args): """ @@ -186,10 +186,10 @@ parameters_vals = args[2 + n_ins + n_states:] out_mem_buffers = [numpy.zeros((nsteps, 4)) for k in - xrange(n_outputs)] + range(n_outputs)] shared_values = [x.copy() for x in original_shared_values] - for step in xrange(nsteps): + for step in range(nsteps): arg_pos = 0 to_add = None for in_info in inputs_info: @@ -250,10 +250,10 @@ all_nodes = my_f.maker.fgraph.toposort() assert len([x for x in all_nodes if isinstance(x.op, ScanOp)]) == 0 - print >>sys.stderr, ' n_steps', n_steps - print >>sys.stderr, ' go_backwards', go_backwards - - print >>sys.stderr, ' Scenario 1. Correct shape' + print(' n_steps', n_steps, file=sys.stderr) + print(' go_backwards', go_backwards, file=sys.stderr) + + print(' Scenario 1. Correct shape', file=sys.stderr) if n_steps is not None: _n_steps = n_steps else: @@ -281,9 +281,9 @@ data = numpy.arange(4) state_values.append(data) param_values = [rng.uniform(size=(4,)) for k in - xrange(n_parameters)] + range(n_parameters)] param_values = [numpy.arange(4) for k in - xrange(n_parameters)] + range(n_parameters)] for var, val in zip(shared_vars, original_shared_values): var.set_value(val) theano_outs = my_f(*(input_values + state_values + @@ -309,7 +309,7 @@ #import ipdb; ipdb.set_trace() raise # Scenario 2 : Loose fit (sequences longer then required) - print >>sys.stderr, ' Scenario 2. Loose shapes' + print(' Scenario 2. Loose shapes', file=sys.stderr) input_values = [] for pos, info in enumerate(inputs_info): taps = [x['tap'] for x in info] @@ -336,7 +336,7 @@ data = rng.uniform(size=(4,)) state_values.append(data) param_values = [rng.uniform(size=(4,)) for k in - xrange(n_parameters)] + range(n_parameters)] for var, val in zip(shared_vars, original_shared_values): var.set_value(val) theano_outs = my_f(*(input_values + state_values + @@ -355,7 +355,7 @@ for th_out, num_out in zip(shared_vars, numpy_shared): assert numpy.allclose(th_out.get_value(), num_out) # Scenario 3 : Less data then required - print >>sys.stderr, ' Scenario 2. Wrong shapes' + print(' Scenario 2. Wrong shapes', file=sys.stderr) input_values = [] for pos, info in enumerate(inputs_info): taps = [x['tap'] for x in info] @@ -373,7 +373,7 @@ data = rng.uniform(size=(offset - 1, 4)) state_values.append(data) param_values = [rng.uniform(size=(4,)) for k in - xrange(n_parameters)] + range(n_parameters)] for var, val in zip(shared_vars, original_shared_values): var.set_value(val) self.assertRaises(Exception, my_f, @@ -405,9 +405,9 @@ test_nb = 0 for n_ins in [1, 2]: # Randomly pick up 4*n_ins combinations of arguments - for k in xrange(4 * n_ins): + for k in range(4 * n_ins): inp = [] - for inp_nb in xrange(n_ins): + for inp_nb in range(n_ins): pos = rng.randint(len(possible_taps_use_pairs)) inp.append(possible_taps_use_pairs[pos]) @@ -429,9 +429,9 @@ dict(tap=-2, use=True)]] for n_ins in [1, 2]: # Randomly pick up 4*n_ins combinations of arguments - for k in xrange(4 * n_ins): + for k in range(4 * n_ins): state = [] - for state_nb in xrange(n_ins): + for state_nb in range(n_ins): pos = rng.randint(len(possible_taps_use_pairs)) state.append(possible_taps_use_pairs[pos]) all_states_info.append(state) @@ -447,18 +447,18 @@ return for n_outputs in [0, 1, 2]: for n_shared_updates in [0, 1, 2]: - for n_random_combinations in xrange(1): + for n_random_combinations in range(1): pos_inp = rng.randint(len(all_inputs_info)) pos_st = rng.randint(len(all_states_info)) pos_param = rng.randint(len(all_parameters_info)) - print >>sys.stderr - print >>sys.stderr, 'Test nb', test_nb - print >>sys.stderr, ' inputs', all_inputs_info[pos_inp] - print >>sys.stderr, ' states', all_states_info[pos_st] - print >>sys.stderr, ' parameters', \ - all_parameters_info[pos_param] - print >>sys.stderr, ' n_outputs', n_outputs - print >>sys.stderr, ' n_shared_updates', n_shared_updates + print(file=sys.stderr) + print('Test nb', test_nb, file=sys.stderr) + print(' inputs', all_inputs_info[pos_inp], file=sys.stderr) + print(' states', all_states_info[pos_st], file=sys.stderr) + print(' parameters', \ + all_parameters_info[pos_param], file=sys.stderr) + print(' n_outputs', n_outputs, file=sys.stderr) + print(' n_shared_updates', n_shared_updates, file=sys.stderr) test_nb += 1 self.new_run(inputs_info=all_inputs_info[pos_inp], states_info=all_states_info[pos_st], @@ -497,7 +497,7 @@ rng = numpy.random.RandomState(utt.fetch_seed()) state = rng.uniform() numpy_values = numpy.array([state * (2 ** (k + 1)) for k - in xrange(abs(n_steps))]) + in range(abs(n_steps))]) theano_values = my_f(state) assert numpy.aRefactoringTool: Refactored ./theano/sandbox/scan_module/scan_utils.py RefactoringTool: Refactored ./theano/sandbox/scan_module/scan_op.py llclose(numpy_values, theano_values) @@ -553,7 +553,7 @@ v_out = numpy.zeros((8,)) v_out[0] = _v_u[0] * W_in + v_x0 * W - for step in xrange(1, steps): + for step in range(1, steps): v_out[step] = _v_u[step] * W_in + v_out[step - 1] * W v_out = v_out[:steps] theano_values = my_f(v_u, v_x0, W_in, W) --- ./theano/sandbox/scan_module/scan_utils.py (original) +++ ./theano/sandbox/scan_module/scan_utils.py (refactored) @@ -15,7 +15,7 @@ import copy import logging -from itertools import izip + import numpy @@ -45,7 +45,7 @@ # Corner case that I might use in an optimization if size == 0: return tensor_var - shapes = [tensor_var.shape[x] for x in xrange(tensor_var.ndim)] + shapes = [tensor_var.shape[x] for x in range(tensor_var.ndim)] zeros_shape = [size + shapes[0]] + shapes[1:] empty = tensor.zeros(zeros_shape, dtype=tensor_var.dtype) @@ -307,7 +307,7 @@ # inside. We don't use the full ShapeFeature interface, but we # let it initialize itself with an empty fgraph, otherwise we will # need to do it manually - for inp, inp_shp in izip(inputs, input_shapes): + for inp, inp_shp in zip(inputs, input_shapes): if inp_shp is not None and len(inp_shp) != inp.ndim: assert len(inp_shp) == inp.ndim @@ -315,7 +315,7 @@ shape_feature.on_attach(theano.gof.FunctionGraph([], [])) # Initialize shape_of with the input shapes - for inp, inp_shp in izip(inputs, input_shapes): + for inp, inp_shp in zip(inputs, input_shapes): shape_feature.set_shape(inp, inp_shp) def local_traverse(out): @@ -367,7 +367,7 @@ inputs = gof.graph.inputs([y]) ins_shapes = [] for inp in inputs: - in_shape = [inp.shape[k] for k in xrange(inp.ndim)] + in_shape = [inp.shape[k] for k in range(inp.ndim)] ins_shapes.append(in_shape) shape = infer_shape([y], inputs, ins_shapes)[0] return tensor.zeros([T, ] + shape, dtype=y.dtype) @@ -412,7 +412,7 @@ pos = index % membuffer.shape[0] if outputs[0] is membuffer: membuffer = membuffer.copy() - print pos + print(pos) out[0][:membuffer.shape[0] - pos] = membuffer[pos:] out[0][membuffer.shape[0] - pos:] = membuffer[:pos] --- ./theano/sandbox/scan_module/scan_op.py (original) +++ ./theano/sandbox/scan_module/scan_op.py (refactored) @@ -13,7 +13,7 @@ __contact__ = "Razvan Pascanu " import logging -from itertools import izip + import numpy @@ -56,7 +56,7 @@ self.hash_inner_graph = options['hash_inner_graph'] # --Construct the destroy map-- if self.inplace: - for idx in xrange(len(outputs)): + for idx in range(len(outputs)): self.destroy_map[idx] = [idx + 1] # --Decide on the default mode-- mode_instance = compile.mode.get_mode(self.mode) @@ -100,18 +100,18 @@ for arg in diff_args: if len(getattr(self, arg)) != len(getattr(other, arg)): return False - for x, y in izip(self.inputs, other.inputs): + for x, y in zip(self.inputs, other.inputs): if x.type != y.type: return False - for x, y in izip(self.lengths, other.lengths): + for x, y in zip(self.lengths, other.lengths): if x.type != y.type: return False s_ins = [self.index] + self.inputs + self.lengths + self.switches o_ins = [other.index] + other.inputs + other.lengths + other.switches - givens = dict(izip(s_ins, o_ins)) + givens = dict(zip(s_ins, o_ins)) # This part might be slow - for x, y in izip(self.outputs, other.outputs): + for x, y in zip(self.outputs, other.outputs): if not gof.graph.is_same_graph(x, y, givens=givens): return False return True @@ -134,7 +134,7 @@ def __hash__(self): rval = hash(type(self)) ^ self.hash_inner_graph - for val in self.options.values(): + for val in list(self.options.values()): if isinstance(val, (list, tuple)): for el in val: rval = rval ^ el @@ -143,12 +143,12 @@ return rval def infer_shape(self, node, input_shapes): - for inp, inp_shp in izip(node.inputs, input_shapes): + for inp, inp_shp in zip(node.inputs, input_shapes): assert inp_shp is None or len(inp_shp) == inp.type.ndim n_outs = len(self.outputs) if self.as_repeatUntil is not None: return [(Shape_i(0)(o),) + x[1:] for o, x - in izip(node.outputs, input_shapes[1: n_outs + 1])] + in zip(node.outputs, input_shapes[1: n_outs + 1])] else: return input_shapes[1: n_outs + 1] @@ -191,7 +191,7 @@ def fake_shared(var): val = 0 - for dim in xrange(var.ndim): + for dim in range(var.ndim): val = [val] val = numpy.asarray(val, dtype=var.dtype) return theano.shared(val, name=var.name) @@ -199,7 +199,7 @@ non_tensor_args = [] non_tensor_buffers = [] aux_buffers = [] - for mem_buf, var in izip(aux_membuffers, aux_inputs): + for mem_buf, var in zip(aux_membuffers, aux_inputs): if mem_buf[0] is not None: givens[var] = theano.shared(mem_buf[0], name=var.name, borrow=True) @@ -215,7 +215,7 @@ updates = {} state_buffers = [] n_numeric_values = len(self.lengths) - for pos in xrange(n_numeric_values): + for pos in range(n_numeric_values): var = base_inputs[pos] mem_buf = base_buffers[pos] expr = self.outputs[pos] @@ -274,19 +274,19 @@ while cont and pos < node_input_storage[0][0]: extra_args = [x[0] for x in non_numeric_states_bufs] rvals = self.fn(*(fix_args + extra_args)) - for buf, rval in izip(non_numeric_states_bufs, rvals): + for buf, rval in zip(non_numeric_states_bufs, rvals): buf[0] = rval cont = rvals[-1] pos = pos + 1 # We need to trim the outputs if they are longer - for pos in xrange(n_numeric_values): + for pos in range(n_numeric_values): buf = state_buffers[pos][2][0] mintap = self.mintaps[pos] if buf.shape[0] > pos + self.mintaps[pos]: node_output_storage[pos][0] = buf[:pos + mintap] else: node_output_storage[pos][0] = buf - for out_buf, in_buf in izip( + for out_buf, in_buf in zip( node_output_storage[n_numeric_values:], non_numeric_states_bufs): out_buf[0] = in_buf[0] @@ -315,16 +315,16 @@ self.index.set_value(numpy.int64(0)) # grab fixed arguments fix_args = [x[0] for x in non_tensor_buffers] - for dx in xrange(node_input_storage[0][0]): + for dx in range(node_input_storage[0][0]): extra_args = [x[0] for x in non_numeric_states_bufs] rvals = self.fn(*(fix_args + extra_args)) - for buf, rval in izip(non_numeric_states_bufs, rvals): + for buf, rval in zip(non_numeric_states_bufs, rvals): buf[0] = rval - for pos in xrange(n_numeric_values): + for pos in range(n_numeric_values): buf = state_buffers[pos][0].get_value(borrow=True) mintap = self.mintaps[pos] node_output_storage[pos][0] = buf - RefactoringTool: Refactored ./theano/sandbox/scan_module/scan.py RefactoringTool: Refactored ./theano/sandbox/scan_module/__init__.py for out_buf, in_buf in izip( + for out_buf, in_buf in zip( node_output_storage[n_numeric_values:], non_numeric_states_bufs): out_buf[0] = in_buf[0] @@ -354,16 +354,16 @@ other_time): # Scan overhead profile if any([isinstance(node.op, Scan) and v > 0 for (_, node), v in - apply_time.items()]): - print - print 'Scan overhead:' + list(apply_time.items())]): + print() + print('Scan overhead:') print (' ') total_super_scan_time = 0 total_scan_fct_time = 0 total_scan_op_time = 0 - for (_, node), v in apply_time.items(): + for (_, node), v in list(apply_time.items()): if isinstance(node.op, Scan): if v > 0: scan_fct_time = node.op.mode_instance.fn_time @@ -371,13 +371,13 @@ total_super_scan_time += v total_scan_fct_time += scan_fct_time total_scan_op_time += scan_op_time - print ' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( + print(' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( v, scan_fct_time, scan_op_time, - scan_fct_time / v * 100, scan_op_time / v * 100), node + scan_fct_time / v * 100, scan_op_time / v * 100), node) else: - print (' The node took 0s, so we can not compute the ' - 'overhead'), node - print ' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( + print((' The node took 0s, so we can not compute the ' + 'overhead'), node) + print(' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % ( total_super_scan_time, total_scan_fct_time, total_scan_op_time, total_scan_fct_time / total_super_scan_time * 100, - total_scan_op_time / total_super_scan_time * 100) + total_scan_op_time / total_super_scan_time * 100)) --- ./theano/sandbox/scan_module/scan.py (original) +++ ./theano/sandbox/scan_module/scan.py (refactored) @@ -42,7 +42,7 @@ __contact__ = "Razvan Pascanu " -from itertools import izip + import logging import numpy @@ -53,8 +53,8 @@ from theano.scalar.sharedvar import shared as scalar_shared from theano.compile.pfunc import rebuild_collect_shared -import scan_op -import scan_utils +from . import scan_op +from . import scan_utils # Logging function for sending warning or info _logger = logging.getLogger('theano.scan_module.scan') @@ -519,17 +519,17 @@ scan_outputs_update_rules = scan_utils.to_list(local_op(*scan_inputs)) # 5.5 Collect outputs and add permutation object scan_outputs = [] - for pos in xrange(len(states_and_outputs)): + for pos in range(len(states_and_outputs)): out = scan_utils.ScanPermutation(mintaps[pos])( scan_outputs_update_rules[pos], t) scan_outputs.append(out[mintaps[pos]:]) # 5.6 Construct updates dictionary update_rules = scan_outputs_update_rules[len(states_and_outputs):] updates = {} - for v, u in izip(original_numeric_shared_variables, + for v, u in zip(original_numeric_shared_variables, update_rules[:len(additional_input_states)]): updates[v] = u[-1] - for v, u in izip(original_non_numeric_shared_variables, + for v, u in zip(original_non_numeric_shared_variables, update_rules[len(additional_input_states):]): updates[v] = u # Step 5.7 We are done and can return everything back to the user --- ./theano/sandbox/scan_module/__init__.py (original) +++ ./theano/sandbox/scan_module/__init__.py (refactored) @@ -38,4 +38,4 @@ __coRefactoringTool: Refactored ./theano/sandbox/scan.py pyright__ = "(c) 2010, Universite de Montreal" __contact__ = "Razvan Pascanu " -from scan import scan +from .scan import scan --- ./theano/sandbox/scan.py (original) +++ ./theano/sandbox/scan.py (refactored) @@ -151,7 +151,7 @@ return_steps = OrderedDict() # wrap outputs info in a dictionary if they are not already in one - for i in xrange(n_outs): + for i in range(n_outs): if outs_info[i] is not None: if not isinstance(outs_info[i], dict): # by default any output has a tap value of -1 @@ -208,7 +208,7 @@ try: nw_slice.tag.test_value = gof.Op._get_test_value( _seq_val_slice) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -283,7 +283,7 @@ if config.compute_test_value != 'off': try: arg.tag.test_value = gof.Op._get_test_value(actual_arg) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -335,7 +335,7 @@ try: nw_slice.tag.test_value = gof.Op._get_test_value( _init_out_var_slice) - except AttributeError, e: + except AttributeError as e: if config.compute_test_value != 'ignore': # No need to print a warning or raise an error now, # it will be done when fn will be called. @@ -362,9 +362,9 @@ max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1 max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1 n_elems = numpy.max([max_mit_sot, max_sit_sot]) - _ordered_args = [[] for x in xrange(n_elems)] + _ordered_args = [[] for x in range(n_elems)] offset = 0 - for idx in xrange(n_mit_sot): + for idx in range(n_mit_sot): n_inputs = len(mit_sot_tap_array[idx]) if n_fixed_steps == 1: _ordered_args[mit_sot_rightOrder[idx]] = \ @@ -374,7 +374,7 @@ mit_sot_inner_inputs[offset:offset + n_inputs] offset += n_inputs - for idx in xrange(n_sit_sot): + for idx in range(n_sit_sot): if n_fixed_steps == 1: _ordered_args[sit_sot_rightOrder[idx]] = \ [sit_sot_inner_slices[idx]] @@ -453,16 +453,15 @@ # extract still missing inputs (there still might be so) and add them # as non sequences at the end of our args fake_nonseqs = [x.type() for x in non_seqs] - fake_outputs = scan_utils.clone(outputs + updates.values(), - replace=dict(zip(non_seqs, - fake_nonseqs))) - all_inputs = itertools.ifilter( + fake_outputs = scan_utils.clone(outputs + list(updates.values()), + replace=dict(list(zip(non_seqs, + fake_nonseqs)))) + all_inputs = filter( lambda x: (isinstance(x, gof.Variable) and not isinstance(x, SharedVariable) and not isinstance(x, gof.Constant)), gof.graph.inputs(fake_outputs)) - extra_inputs = filter(lambda x: x not in args + fake_nonseqs, - all_inputs) + extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs] non_seqs += extra_inputs ## Note we do not use all_inputs directly since the order of variables ## in args is quite important @@ -508,7 +507,7 @@ n_outs = len(dummy_f.maker.outputs) if as_while: RefactoringTool: Refactored ./theano/sandbox/rng_mrg.py RefactoringTool: No changes to ./theano/sandbox/neighbours.py RefactoringTool: Refactored ./theano/sandbox/neighbourhoods.py n_outs = n_outs - 1 - outs_info = [dict(steps=n_steps) for x in xrange(n_outs)] + outs_info = [dict(steps=n_steps) for x in range(n_outs)] ## Step 5.1 Outputs with taps different then -1 @@ -565,7 +564,7 @@ if (not isinstance(arg, SharedVariable) and not isinstance(arg, tensor.Constant))] - givens.update(dict(zip(other_scan_args, other_inner_args))) + givens.update(dict(list(zip(other_scan_args, other_inner_args)))) other_shared_scan_args = [arg.variable for arg in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and @@ -574,8 +573,8 @@ in dummy_f.maker.expanded_inputs if (isinstance(arg.variable, SharedVariable) and not arg.update)] - givens.update(dict(zip(other_shared_scan_args, - other_shared_inner_args))) + givens.update(dict(list(zip(other_shared_scan_args, + other_shared_inner_args)))) ## ### Step 6. Re-order the outputs and clone them replacing things @@ -597,7 +596,7 @@ if condition is not None: inner_outs.append(condition) new_givens = OrderedDict() - for w, w_copy in givens.iteritems(): + for w, w_copy in givens.items(): new_givens[w] = w.type.filter_variable(w_copy) new_outs = scan_utils.clone(inner_outs, replace=new_givens) @@ -606,7 +605,7 @@ ### Step 7. Create the Scan Op ## - tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)] + tap_array = mit_sot_tap_array + [[-1] for x in range(n_sit_sot)] info = OrderedDict() info['tap_array'] = tap_array @@ -662,7 +661,7 @@ mit_sot_outs = scan_outs[offset:offset + n_mit_sot] offset += n_mit_sot - offsets = [1 for x in xrange(n_sit_sot)] + offsets = [1 for x in range(n_sit_sot)] sit_sot_outs = scan_outs[offset:offset + n_sit_sot] offset += n_sit_sot --- ./theano/sandbox/rng_mrg.py (original) +++ ./theano/sandbox/rng_mrg.py (refactored) @@ -18,7 +18,7 @@ from theano.gof import local_optimizer from theano.gof.python25 import all, any -import multinomial +from . import multinomial from theano.sandbox.cuda import cuda_available, cuda_enabled, GpuOp if cuda_available: @@ -29,8 +29,8 @@ def matVecModM(A, s, m): # return (A * s) % m x = numpy.zeros_like(s) - for i in xrange(len(x)): - for j in xrange(len(s)): + for i in range(len(x)): + for j in range(len(s)): r = numpy.int32((numpy.int64(A[i][j]) * s[j] + x[i]) % m) if r >= 0: x[i] = r @@ -197,7 +197,7 @@ o_rstate, o_sample = out numpy_version = numpy.__version__.split('.') if not self.warned_numpy_version and int(numpy_version[0]) <= 1 and int(numpy_version[1]) <3 : - print "Warning: you must use numpy version 1.3.0 or higher with the python version of this op. Otherwise numpy leak memory. and numpy" + print("Warning: you must use numpy version 1.3.0 or higher with the python version of this op. Otherwise numpy leak memory. and numpy") self.warned_numpy_version = True n_elements = 1 @@ -215,7 +215,7 @@ err_orig = numpy.seterr(over='ignore') try: - for i in xrange(n_elements): + for i in range(n_elements): sample = mrg_next_value(rstate[i % n_streams], rstate[i % n_streams]) rval[i] = sample @@ -702,7 +702,7 @@ assert n_streams > 0 rval = numpy.zeros((n_streams, 6), dtype='int32') rval[0] = self.rstate - for i in xrange(1, n_streams): + for i in range(1, n_streams): rval[i] = ff_2p72(rval[i - 1]) if inc_rstate: self.inc_rstate() --- ./theano/sandbox/neighbourhoods.py (original) +++ ./theano/sandbox/neighbourhoods.py (refactored) @@ -12,7 +12,7 @@ RefactoringTool: No changes to ./theano/sandbox/multinomial.py RefactoringTool: Refactored ./theano/sandbox/minimal.py RefactoringTool: Refactored ./theano/sandbox/linalg/tests/test_linalg.py from theano import gof, Op, tensor, Variable, Apply import numpy -import __builtin__ +import builtins class NeighbourhoodsFromImages(Op): def __init__(self, n_dims_before, dims_neighbourhoods, @@ -84,9 +84,9 @@ self.code_string, self.code = self.make_py_code() def _compute_neigh_strides(self): - neigh_strides = [1 for i in xrange(len(self.strides))] + neigh_strides = [1 for i in range(len(self.strides))] cur_stride = 1 - for i in xrange(len(self.strides)-1, -1, -1): + for i in range(len(self.strides)-1, -1, -1): neigh_strides[i] = cur_stride cur_stride *= self.dims_neighbourhoods[i] return neigh_strides @@ -115,7 +115,7 @@ def out_shape(self, input_shape): dims = list(input_shape[:self.n_dims_before]) - num_strides = [0 for i in xrange(len(self.strides))] + num_strides = [0 for i in range(len(self.strides))] neigh_flattened_dim = 1 for i, ds in enumerate(self.dims_neighbourhoods): cur_stride = self.strides[i] @@ -212,14 +212,14 @@ def make_py_code(self): code = self._py_outerloops() - for i in xrange(len(self.strides)): + for i in range(len(self.strides)): code += self._py_innerloop(i) code += self._py_assignment() - return code, __builtin__.compile(code, '', 'exec') + return code, builtins.compile(code, '', 'exec') def _py_outerloops(self): code_before = "" - for dim_idx in xrange(self.n_dims_before): + for dim_idx in range(self.n_dims_before): code_before += ('\t' * (dim_idx)) + \ "for outer_idx_%d in xrange(input_shape[%d]):\n" % \ (dim_idx, dim_idx) @@ -247,13 +247,13 @@ def _py_flattened_idx(self): return "+".join(["neigh_strides[%d]*neigh_idx_%d" % (i,i) \ - for i in xrange(len(self.strides))]) + for i in range(len(self.strides))]) def _py_assignment(self): input_idx = "".join(["outer_idx_%d," % (i,) \ - for i in xrange(self.n_dims_before)]) + for i in range(self.n_dims_before)]) input_idx += "".join(["dim_%d_offset+neigh_idx_%d," % \ - (i,i) for i in xrange(len(self.strides))]) + (i,i) for i in range(len(self.strides))]) out_idx = "".join(\ ["outer_idx_%d," % (i,) for i in \ range(self.n_dims_before)] + \ --- ./theano/sandbox/minimal.py (original) +++ ./theano/sandbox/minimal.py (refactored) @@ -34,9 +34,9 @@ # do what you want here, # but do not modify any of the arguments [inplace]. - print "perform got %i arguments" % len(inputs) + print("perform got %i arguments" % len(inputs)) - print "Max of input[0] is ", numpy.max(inputs[0]) + print("Max of input[0] is ", numpy.max(inputs[0])) # return some computed value. # do not return something that is aliased to one of the inputs. @@ -57,11 +57,11 @@ A=tensor.matrix() b=tensor.vector() - print 'building function' + print('building function') f = function([A, b], minimal(A, A, b, b, A)) - print 'built' + print('built') Aval=self.rng.randn(5,5) - bval=numpy.array(range(5),dtype=float) + bval=numpy.array(list(range(5)),dtype=float) f(Aval,bval) - print 'done' + print('done') --- ./theano/sandbox/linalg/tests/test_linalg.py (original) +++ ./theano/sandbox/linalg/tests/test_linalg.py (refactored) @@ -158,7 +158,7 @@ n = rng.randint(4) + 2 rs = [] xs = [] - for k in xrange(n): + for k in range(n): rs += [rng.randn(4, 4).astype(theano.config.floatX)] xs += [tensor.matrix()] sol = matrix_dot(*xs) @@ -531,7 +531,7 @@ def test_eval(self): A = theano.tensor.matrix(dtype=self.dtype) - self.assertEquals([e.eval({A: [[1]]}) for RefactoringTool: No changes to ./theano/sandbox/linalg/tests/test_kron.py RefactoringTool: Refactored ./theano/sandbox/linalg/ops.py e in self.op(A)], + self.assertEqual([e.eval({A: [[1]]}) for e in self.op(A)], [[1.0], [[1.0]]]) x = [[0, 1], [1, 0]] w, v = [e.eval({A: x}) for e in self.op(A)] --- ./theano/sandbox/linalg/ops.py (original) +++ ./theano/sandbox/linalg/ops.py (refactored) @@ -153,7 +153,7 @@ def update_second_from_first(self, r0, r1): old_hints = self.hints[r0] new_hints = self.hints[r1] - for k, v in old_hints.items(): + for k, v in list(old_hints.items()): if k in new_hints and new_hints[k] is not v: raise NotImplementedError() if k not in new_hints: @@ -454,24 +454,24 @@ N = x.shape[0] if self.lower: F = numpy.tril(dz) - for k in xrange(N - 1, -1, -1): - for j in xrange(k + 1, N): - for i in xrange(j, N): + for k in range(N - 1, -1, -1): + for j in range(k + 1, N): + for i in range(j, N): F[i, k] -= F[i, j] * L[j, k] F[j, k] -= F[i, j] * L[i, k] - for j in xrange(k + 1, N): + for j in range(k + 1, N): F[j, k] /= L[k, k] F[k, k] -= L[j, k] * F[j, k] F[k, k] /= (2 * L[k, k]) else: F = numpy.triu(dz) M = N - 1 - for k in xrange(N - 1, -1, -1): - for j in xrange(k + 1, N): - for i in xrange(j, N): + for k in range(N - 1, -1, -1): + for j in range(k + 1, N): + for i in range(j, N): F[k, i] -= F[j, i] * L[k, j] F[k, j] -= F[j, i] * L[k, i] - for j in xrange(k + 1, N): + for j in range(k + 1, N): F[k, j] /= L[k, k] F[k, k] -= L[k, j] * F[k, j] F[k, k] /= (2 * L[k, k]) @@ -518,7 +518,9 @@ assert x.ndim == 2 return Apply(self, [x], [x.type()]) - def perform(self, node, (x,), (z, )): + def perform(self, node, xxx_todo_changeme, xxx_todo_changeme1): + (x,) = xxx_todo_changeme + (z, ) = xxx_todo_changeme1 try: if imported_scipy: z[0] = scipy.linalg.pinv(x).astype(x.dtype) @@ -567,7 +569,9 @@ assert x.ndim == 2 return Apply(self, [x], [x.type()]) - def perform(self, node, (x,), (z, )): + def perform(self, node, xxx_todo_changeme2, xxx_todo_changeme3): + (x,) = xxx_todo_changeme2 + (z, ) = xxx_todo_changeme3 try: z[0] = numpy.linalg.inv(x).astype(x.dtype) except numpy.linalg.LinAlgError: @@ -769,7 +773,9 @@ def grad(self, inputs, g_outputs): return [extract_diag(g_outputs[0])] - def perform(self, node, (x,), (z,)): + def perform(self, node, xxx_todo_changeme4, xxx_todo_changeme5): + (x,) = xxx_todo_changeme4 + (z,) = xxx_todo_changeme5 if x.ndim != 1: raise TypeError(x) z[0] = numpy.diag(x) @@ -808,11 +814,13 @@ o = theano.tensor.scalar(dtype=x.dtype) return Apply(self, [x], [o]) - def perform(self, node, (x,), (z, )): + def perform(self, node, xxx_todo_changeme6, xxx_todo_changeme7): + (x,) = xxx_todo_changeme6 + (z, ) = xxx_todo_changeme7 try: z[0] = numpy.asarray(numpy.linalg.det(x), dtype=x.dtype) except Exception: - print 'Failed to compute determinant', x + print('Failed to compute determinant', x) raise def grad(self, inputs, g_outputs): @@ -858,7 +866,7 @@ raise ValueError('spectral_radius_bound requires a strictly positive ' 'exponent', log2_exponent) XX = X - for i in xrange(log2_exponent): + for i in range(log2_exponent): XX = tensor.dot(XX, XX) return tensor.pow( trace(XX), @@ -927,7 +935,9 @@ RefactoringTool: Refactored ./theano/sandbox/linalg/kron.py RefactoringTool: Refactored ./theano/sandbox/linalg/__init__.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/type.py RefactoringTool: No changes to ./theano/sandbox/gpuarray/tests/test_type.py RefactoringTool: No changes to ./theano/sandbox/gpuarray/tests/test_subtensor.py RefactoringTool: No changes to ./theano/sandbox/gpuarray/tests/test_opt.py RefactoringTool: No changes to ./theano/sandbox/gpuarray/tests/test_elemwise.py RefactoringTool: No changes to ./theano/sandbox/gpuarray/tests/test_blas.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/tests/test_basic_ops.py v = theano.tensor.matrix(dtype=x.dtype) return Apply(self, [x], [w, v]) - def perform(self, node, (x,), (w, v)): + def perform(self, node, xxx_todo_changeme8, xxx_todo_changeme9): + (x,) = xxx_todo_changeme8 + (w, v) = xxx_todo_changeme9 try: w[0], v[0] = [z.astype(x.dtype) for z in self._numop(x)] except numpy.linalg.LinAlgError: @@ -985,7 +995,9 @@ v = theano.tensor.matrix(dtype=x.dtype) return Apply(self, [x], [w, v]) - def perform(self, node, (x,), (w, v)): + def perform(self, node, xxx_todo_changeme10, xxx_todo_changeme11): + (x,) = xxx_todo_changeme10 + (w, v) = xxx_todo_changeme11 try: w[0], v[0] = self._numop(x, self.UPLO) except numpy.linalg.LinAlgError: @@ -1054,7 +1066,7 @@ return 'EighGrad{%s}' % self.UPLO def make_node(self, x, w, v, gw, gv): - x, w, v, gw, gv = map(as_tensor_variable, (x, w, v, gw, gv)) + x, w, v, gw, gv = list(map(as_tensor_variable, (x, w, v, gw, gv))) assert x.ndim == 2 assert w.ndim == 1 assert v.ndim == 2 @@ -1075,9 +1087,9 @@ outer = numpy.outer G = lambda n: sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m]) - for m in xrange(N) if m != n) + for m in range(N) if m != n) g = sum(outer(v[:, n], v[:, n] * W[n] + G(n)) - for n in xrange(N)) + for n in range(N)) # Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a) # (triu(a)) only. This means that partial derivative of --- ./theano/sandbox/linalg/kron.py (original) +++ ./theano/sandbox/linalg/kron.py (refactored) @@ -23,7 +23,7 @@ o = tensor.outer(a, b) o = o.reshape(tensor.concatenate((a.shape, b.shape)), a.ndim + b.ndim) - shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim)) + shf = o.dimshuffle(0, 2, 1, * list(range(3, o.ndim))) if shf.ndim == 3: shf = o.dimshuffle(1, 0, 2) o = shf.flatten() --- ./theano/sandbox/linalg/__init__.py (original) +++ ./theano/sandbox/linalg/__init__.py (refactored) @@ -1,6 +1,6 @@ -from kron import kron -from ops import (cholesky, matrix_inverse, solve, +from .kron import kron +from .ops import (cholesky, matrix_inverse, solve, diag, extract_diag, alloc_diag, det, psd, eig, eigh, trace, spectral_radius_bound) --- ./theano/sandbox/gpuarray/type.py (original) +++ ./theano/sandbox/gpuarray/type.py (refactored) @@ -206,7 +206,7 @@ class _operators(_tensor_py_operators): def _as_TensorVariable(self): - from basic_ops import host_from_gpu + from .basic_ops import host_from_gpu return host_from_gpu(self) def _as_GpuArrayVariable(self): --- ./theano/sandbox/gpuarray/tests/test_basic_ops.py (original) +++ ./theano/sandbox/gpuarray/tests/test_basic_ops.py (refactored) @@ -1,5 +1,5 @@ import unittest -from itertools import izip + from copy import copy, deepcopy import numpy @@ -59,7 +59,7 @@ def wrapper(): try: f() - except Exception, e: + except Exception as e: if isinstance(e, EClass): raise KnownFailureTest(msg, e) raise @@ -92,7 +92,7 @@ dtype = kwargs.pop('dtype', theano.config.floatX) cls = kwargs.pop('cls', None) if len(kwargs) != 0: - raise TypeError('Unexpected argument %s', kwargs.keys()[0]) + raise TypeError('Unexpected argument %s', list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls) @@ -121,7 +121,7 @@ if skip: raise SkipTest(skip) - for testname, inputs in cases.items(): + for testname, inputs in list(cases.items()): self.run_case(testname, inputs) def run_case(self, testname, inputs): @@ -131,7 +131,7 @@ try: node_ref = safe_make_node(self.op, *inputs_ref) RefactoringTool: Refactored ./theano/sandbox/gpuarray/subtensor.py node_tst = safe_make_node(self.op, *inputs_tst) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occured while making " "a node with inputs %s") % (self.gpu_op, testname, inputs) @@ -141,7 +141,7 @@ try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) - except Exception, exc: + except Exception as exc: err_msg = ("Test %s::%s: Error occured while trying to " "make a Function") % (self.gpu_op, testname) exc.args += (err_msg,) @@ -152,12 +152,12 @@ ref_e = None try: expecteds = f_ref() - except Exception, exc: + except Exception as exc: ref_e = exc try: variables = f_tst() - except Exception, exc: + except Exception as exc: if ref_e is None: err_msg = ("Test %s::%s: exception when calling the " "Function") % (self.gpu_op, testname) @@ -177,7 +177,7 @@ raise for i, (variable, expected) in \ - enumerate(izip(variables, expecteds)): + enumerate(zip(variables, expecteds)): if variable.dtype != expected.dtype or \ variable.shape != expected.shape or \ not TensorType.values_eq_approx(variable, @@ -188,7 +188,7 @@ self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype)) - for description, check in self.checks.items(): + for description, check in list(self.checks.items()): if not check(inputs, variables): self.fail(("Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)") % --- ./theano/sandbox/gpuarray/subtensor.py (original) +++ ./theano/sandbox/gpuarray/subtensor.py (refactored) @@ -1,4 +1,4 @@ -import StringIO +import io import numpy @@ -88,8 +88,8 @@ if (!%(out)s) { %(fail)s } """ % dict(out=outputs[0], inp=inp, fail=sub['fail']) - sio = StringIO.StringIO() - print >> sio, """ + sio = io.StringIO() + print(""" ssize_t starts[%(sz)s]; ssize_t stops[%(sz)s]; ssize_t steps[%(sz)s]; @@ -100,7 +100,7 @@ PyErr_SetString(PyExc_IndexError, "invalid index"); %(fail)s } - """ % dict(sz=len(idx_list), inp=inp, fail=sub['fail']) + """ % dict(sz=len(idx_list), inp=inp, fail=sub['fail']), file=sio) def fix_idx(idx): if idx is None: @@ -117,7 +117,7 @@ start, start_n = fix_idx(idx.start) stop, stop_n = fix_idx(idx.stop) step, step_n = fix_idx(idx.step) - print >>sio, """ + print(""" starts[%(i)s] = %(start)s; stops[%(i)s] = %(stop)s; steps[%(i)s] = %(step)s; @@ -128,7 +128,7 @@ } """ % dict(i=i, start=start, stop=stop, step=step, start_n=start_n, stop_n=stop_n, step_n=step_n, - fail=sub['fail'], inp=inp) + fail=sub['fail'], inp=inp), file=sio) else: if isinstance(idx, gof.Type): start = indices.pop(0) @@ -136,19 +136,19 @@ start = idx else: assert 0, idx - print >>sio, """ + print(""" cur = %(start)s; if (cur < 0) cur += %(inp)s->ga.dimensions[%(i)s]; starts[%(i)s] = RefactoringTool: Refactored ./theano/sandbox/gpuarray/opt.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/elemwise.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/blas.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/basic_ops.py RefactoringTool: Refactored ./theano/sandbox/gpuarray/__init__.py RefactoringTool: Refactored ./theano/sandbox/fourier.py RefactoringTool: Refactored ./theano/sandbox/downsample.py cur; steps[%(i)s] = 0; - """ % dict(i=i, start=start, fail=sub['fail'], inp=inp) + """ % dict(i=i, start=start, fail=sub['fail'], inp=inp), file=sio) - print >>sio, """ + print(""" Py_XDECREF(%(out)s); %(out)s = pygpu_index(%(inp)s, starts, stops, steps); if (!%(out)s) { %(fail)s } -""" % dict(name=name, fail=sub['fail'], inp=inp, out=outputs[0]) +""" % dict(name=name, fail=sub['fail'], inp=inp, out=outputs[0]), file=sio) return sio.getvalue() --- ./theano/sandbox/gpuarray/opt.py (original) +++ ./theano/sandbox/gpuarray/opt.py (refactored) @@ -99,7 +99,7 @@ new_input = host_from_gpu(gpu_from_host(input)) fgraph.replace_validate(input, new_input, "InputToGpuOptimizer") - except TypeError, e: + except TypeError as e: # This could fail if the inputs are not TensorTypes pass --- ./theano/sandbox/gpuarray/elemwise.py (original) +++ ./theano/sandbox/gpuarray/elemwise.py (refactored) @@ -1,5 +1,5 @@ import copy -from itertools import izip + import numpy from theano import Op, Apply, scalar, config @@ -151,7 +151,7 @@ node._cache_elemwise_k = self.generate_kernel(node, "kcode") out_shape = [] - for values in izip(*[input.shape for input in inputs]): + for values in zip(*[input.shape for input in inputs]): if any(v == 0 for v in values): # All non-broadcasted dimensions should be zero assert max(values) <= 1 @@ -161,7 +161,7 @@ out_shape = tuple(out_shape) args = copy.copy(inputs) - for n, (stor, out) in enumerate(izip(output_storage, node.outputs)): + for n, (stor, out) in enumerate(zip(output_storage, node.outputs)): if n in self.inplace_pattern: stor[0] = inputs[self.inplace_pattern[n]] else: --- ./theano/sandbox/gpuarray/blas.py (original) +++ ./theano/sandbox/gpuarray/blas.py (refactored) @@ -6,7 +6,7 @@ try: import pygpu from pygpu import blas -except ImportError, e: +except ImportError as e: # To make sure theano is importable pass --- ./theano/sandbox/gpuarray/basic_ops.py (original) +++ ./theano/sandbox/gpuarray/basic_ops.py (refactored) @@ -19,7 +19,7 @@ except ImportError: pass -from type import GpuArrayType +from .type import GpuArrayType def as_gpuarray_variable(x): @@ -718,7 +718,7 @@ return [out_shape] def grad(self, inp, grads): - return [grad_undefined(self, i, inp[i]) for i in xrange(3)] + return [grad_undefined(self, i, inp[i]) for i in range(3)] def __eq__(self, other): return type(self) == type(other) and self.dtype == other.dtype --- ./theano/sandbox/gpuarray/__init__.py (original) +++ ./theano/sandbox/gpuarray/__init__.py (refactored) @@ -26,9 +26,9 @@ in_c_key=True) # This is for documentation not to depend on the availability of pygpu -from type import (GpuArrayType, GpuArrayVariable, GpuArrayConstant, +from .type import (GpuArrayType, GpuArrayVariable, GpuArrayConstant, GpuArraySharedVariable, gpuarray_shared_constructor) -import opt +from . import opt def init_dev(dev): --- ./theano/sandbox/fourier.py (original) +++ ./theano/sandbox/fourier.py (refactored) @@ -111,7 +111,7 @@ rval = numpy.zeros((rows, cols)) col_range = numpy.arange(cols) scale = numpy.sqrt(2.0/cols) - for i in xrange(rows): + for i in range(rows): rval[i] = numpy.cos(i * (col_range*2+1)/(2.0 * cols) * numpy.pi) * scale if unitary: --- ./theano/sandbox/downsample.py (original) +++ ./theano/sandbox/downsample.py (refactored) @@ -1,5 +1,5 @@ import sys -print >> sys.stderr, "DEPRECATION: theano.sandbox.downsample is deprecated. Use theano.tensor.signal.downsample instead." +print("DEPRECATION: theano.sandbox.downsample is deprecated. Use theano.tensor.signal.downsample instead.", file=RefactoringTool: Refactored ./theano/sandbox/debug.py RefactoringTool: Refactored ./theano/sandbox/cuda/var.py RefactoringTool: Refactored ./theano/sandbox/cuda/type.py sys.stderr) from theano.tensor.signal.downsample import * --- ./theano/sandbox/debug.py (original) +++ ./theano/sandbox/debug.py (refactored) @@ -66,7 +66,7 @@ for r in node.outputs: try: r.type.filter(r.value, strict = True) - except TypeError, e: + except TypeError as e: exc_type, exc_value, exc_trace = sys.exc_info() exc = DebugException(e, "The output %s was filled with data with the wrong type using linker " \ ("%s. This happened at step %i of the program." % (r, linker, i)) + \ @@ -79,7 +79,7 @@ exc.node = node exc.thunk = thunk exc.linker = linker - raise DebugException, exc, exc_trace + raise DebugException(exc).with_traceback(exc_trace) def compare_variables(self, i, node, *thunks): thunk0 = thunks[0] @@ -133,7 +133,7 @@ self.store_value(i, node, *thunks) for f in self.debug_post: f(i, node, *thunks) - except Exception, e: + except Exception as e: exc_type, exc_value, exc_trace = sys.exc_info() if isinstance(e, DebugException): raise @@ -144,15 +144,15 @@ exc.step = i exc.node = node exc.thunks = thunks - raise DebugException, exc, exc_trace + raise DebugException(exc).with_traceback(exc_trace) def print_info(i, node, *thunks): - print "step %i, node %s" % (i, node) + print("step %i, node %s" % (i, node)) def print_from(i, node, *thunks): - print "parents:", ", ".join(str(input.step) for input in node.inputs) + print("parents:", ", ".join(str(input.step) for input in node.inputs)) def print_input_shapes(i, node, *thunks): shapes = [] @@ -161,13 +161,13 @@ shapes.append(str(input.value.shape)) else: shapes.append('N/A') - print "input shapes:", ", ".join(shapes) + print("input shapes:", ", ".join(shapes)) def print_input_types(i, node, *thunks): - print "input types:", ", ".join(str(type(input.value)) for input in node.inputs) + print("input types:", ", ".join(str(type(input.value)) for input in node.inputs)) def print_sep(i, node, *thunks): - print "===================================" + print("===================================") import numpy def numpy_compare(a, b, tolerance = 1e-6): --- ./theano/sandbox/cuda/var.py (original) +++ ./theano/sandbox/cuda/var.py (refactored) @@ -158,11 +158,11 @@ if broadcastable is None: broadcastable = (False,) * len(value.shape) type = CudaNdarrayType(broadcastable=broadcastable) - print "trying to return?" + print("trying to return?") try: rval = CudaNdarraySharedVariable(type=type, value=_value, name=name, strict=strict) - except Exception, e: - print "ERROR", e + except Exception as e: + print("ERROR", e) raise return rval @@ -200,8 +200,8 @@ try: rval = CudaNdarraySharedVariable(type=type, value=deviceval, name=name, strict=strict) - except Exception, e: - print "ERROR", e + except Exception as e: + print("ERROR", e) raise rval.get_value_return_ndarray = get_value_return_ndarray --- ./theano/sandbox/cuda/type.py (original) +++ ./theano/sandbox/cuda/type.py (refactored) @@ -1,7 +1,7 @@ """Provide CudaNdarrayType """ import os -import copy_reg +import copyreg import numpy @@ -277,7 +277,7 @@ sio = StringIO() fail = sub['fail'] nd = self.ndim - print >> sio, """ + print(""" assert(py_%(name)s->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. @@ -295,10 +295,10 @@ %(fail)s; } //std::cerr << "c_extract " << %(name)s << " nd check passed\\n"; - RefactoringTool: Refactored ./theano/sandbox/cuda/tests/walltime.py RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_viewop.py RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_var.py RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_tensor_op.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_rng_curand.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_opt.py RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_nvcc_compiler.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_nnet.py """ % locals() + """ % locals(), file=sio) for i, b in enumerate(self.broadcastable): if b: - print >> sio, """ + print(""" if (CudaNdarray_HOST_DIMS(%(name)s)[%(i)s] != 1) { PyErr_Format(PyExc_RuntimeError, @@ -320,8 +320,8 @@ %(fail)s; } //std::cerr << "c_extract " << %(name)s << "bcast check %(i)s passed\\n"; - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" assert(%(name)s); Py_INCREF(py_%(name)s); } @@ -340,7 +340,7 @@ %(fail)s; } //std::cerr << "c_extract done " << %(name)s << '\\n'; - """ % locals() + """ % locals(), file=sio) #print sio.getvalue() return sio.getvalue() @@ -483,7 +483,7 @@ # the situation. def CudaNdarray_unpickler(npa): return cuda.CudaNdarray(npa) -copy_reg.constructor(CudaNdarray_unpickler) +copyreg.constructor(CudaNdarray_unpickler) def CudaNdarray_pickler(cnda): @@ -491,5 +491,5 @@ # In case cuda is not imported. if cuda is not None: - copy_reg.pickle(cuda.CudaNdarray, CudaNdarray_pickler, + copyreg.pickle(cuda.CudaNdarray, CudaNdarray_pickler, CudaNdarray_unpickler) --- ./theano/sandbox/cuda/tests/walltime.py (original) +++ ./theano/sandbox/cuda/tests/walltime.py (refactored) @@ -8,23 +8,23 @@ def compare_fns(fns, input, reps=10): times = {} - for implname, impl in fns.iteritems(): + for implname, impl in fns.items(): try: - print 'TOPOSORT', implname + print('TOPOSORT', implname) for i, n in enumerate(impl.maker.fgraph.toposort()): - print i, n + print(i, n) except Exception: pass t0 = time.time() - for i in xrange(reps): + for i in range(reps): impl(input) dt = time.time() - t0 times[implname] = dt return times def showtimes(times): - for impl, dt in times.iteritems(): - print impl, dt + for impl, dt in times.items(): + print(impl, dt) def cmp_sigmoids(shape): def numpy_sigmoid(input): --- ./theano/sandbox/cuda/tests/test_rng_curand.py (original) +++ ./theano/sandbox/cuda/tests/test_rng_curand.py (refactored) @@ -177,8 +177,8 @@ for f in mrg_u, crn_u, mrg_n, crn_n: # don't time the first call, it has some startup cost - print 'DEBUGPRINT' - print '----------' + print('DEBUGPRINT') + print('----------') theano.printing.debugprint(f) for i in range(100): --- ./theano/sandbox/cuda/tests/test_opt.py (original) +++ ./theano/sandbox/cuda/tests/test_opt.py (refactored) @@ -12,6 +12,7 @@ from theano.tests import unittest_tools as utt import theano.sandbox.cuda as cuda +from functools import reduce if cuda.cuda_available == False: raise SkipTest('Optional package cuda disabled') @@ -328,7 +329,7 @@ f = pfunc([b, c], [a + b + c], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() for i, node in enumerate(topo): - print >> sys.stdout, i, node + print(i, node, file=sys.stdout) assert len(topo) == 4 assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite) #let debugmode catch errors @@ -364,7 +365,7 @@ f = theano.function([X,Y], Z, mode=mode_with_gpu) packed, = f.maker.fgraph.inputs[1].clients client, idx = packed - print client + print(client) assert isinstance(client.op, tensor.Elemwise) assert isinstance(client.op.scalar_op, theano.scalar.Cast) packed ,= client.outputs[0].clients --- ./theano/sandbox/cuda/tests/test_nnet.py (original) +++ ./theano/sandbox/cuda/tests/test_nnet.py (refactored) @@ -146,17 +146,17 @@ scaled_err = numpy.minimum(abs_err / atol, rel_err / rtol) max_i = scaled_err.argmax() - print 'max err index:', max_i, max_i / batch_size, - RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_neighbours.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_mlp.py print max_i % batch_size, max_i / n_out, max_i & n_out - print 'At that index:' - print 'err:', scaled_err.flatten()[max_i] - print 'absolute error:', abs_err.flatten()[max_i] - print 'relative error:', rel_err.flatten()[max_i] - print 'cpu_out:', cpu_out.flatten()[max_i] - print 'gpu_out:', gpu_out.flatten()[max_i] - print 'softmax_output_value:', softmax_output_value.flatten()[max_i] - print 'dnll_value:', dnll_value[max_i / n_out] - print 'y_idx_value:', y_idx_value[max_i / n_out] + print('max err index:', max_i, max_i / batch_size, end=' ') + print(max_i % batch_size, max_i / n_out, max_i & n_out) + print('At that index:') + print('err:', scaled_err.flatten()[max_i]) + print('absolute error:', abs_err.flatten()[max_i]) + print('relative error:', rel_err.flatten()[max_i]) + print('cpu_out:', cpu_out.flatten()[max_i]) + print('gpu_out:', gpu_out.flatten()[max_i]) + print('softmax_output_value:', softmax_output_value.flatten()[max_i]) + print('dnll_value:', dnll_value[max_i / n_out]) + print('y_idx_value:', y_idx_value[max_i / n_out]) assert False, "numpy.allclose(cpu_out, gpu_out, rtol=%s, atol=%s)" % ( rtol, atol) --- ./theano/sandbox/cuda/tests/test_mlp.py (original) +++ ./theano/sandbox/cuda/tests/test_mlp.py (refactored) @@ -1,7 +1,7 @@ import copy import logging import time -from itertools import izip + from nose.plugins.skip import SkipTest import numpy @@ -59,14 +59,14 @@ def print_mode(mode): - if mode is not None and isinstance(mode, (theano.compile.ProfileMode,)): + if mode is not None and isinstance(mode, theano.compile.ProfileMode): mode.print_summary() def print_diff_mode(a, b): if (a is not None and - isinstance(a, (theano.compile.ProfileMode,)) and - isinstance(b, (theano.compile.ProfileMode,))): + isinstance(a, theano.compile.ProfileMode) and + isinstance(b, theano.compile.ProfileMode)): a.print_diff_summary(b) @@ -96,7 +96,7 @@ out = tensor.tanh(tensor.dot(hid, v) + c) loss = tensor.sum(0.5 * (out - y) ** 2 * lr) if 0: - print 'loss type', loss.type + print('loss type', loss.type) params = [w, b, v, c] gparams = tensor.grad(loss, params) @@ -105,11 +105,11 @@ #print 'building pfunc ...' train = pfunc([x, y, lr], [loss], mode=mode, - updates=[(p, p - g) for p, g in izip(params, gparams)]) + updates=[(p, p - g) for p, g in zip(params, gparams)]) if 0: for i, n in enumerate(train.maker.fgraph.toposort()): - print i, n + print(i, n) xval = my_rand(n_batch, n_in) yval = my_rand(n_batch, n_out) @@ -117,7 +117,7 @@ t0 = time.time() rval = [] - for i in xrange(n_train): + for i in range(n_train): rval.append(train(xval, yval, lr)) dt = time.time() - t0 @@ -210,7 +210,7 @@ yval = my_rand(n_batch, n_out) lr = theano._asarray(0.01, dtype='float32') - for i in xrange(n_train): + for i in range(n_train): rval = train(xval, yval, lr) #print 'training done' print_mode(mode) @@ -301,7 +301,7 @@ xval = my_rand(*shape_img) yval = my_rand(n_batch, n_out) # int32 make all 0... lr = theano._asarray(0.01, dtype='float32') - for i in xrange(n_train): + for i in range(n_train): rval = train(xval, yval, lr) print_mode(mode) @@ -328,7 +328,7 @@ isize1 = isize isize2 = isize - if isinstance(isize, (tuple, )): + if isinstance(isize, tuple): isize1 = isize[0] isize2 = isize[1] @@ -432,16 +432,16 @@ rvals = my_zeros(n_train) t0 = time.time() - for i in xrange(n_train): + for i in range(n_train): rvals[i] = train(xval, yval, lr)[0] t1 = time.time() print_mode(mode) if pickle and isinstance(mode, theano.compile.ProfileMode): import pickle - print "RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_memory.py BEGIN %s profile mode dump" % device - print pickle.dumps(mode) - print "END %s profile mode dump" % device + print("BEGIN %s profile mode dump" % device) + print(pickle.dumps(mode)) + print("END %s profile mode dump" % device) #print "%s time: %.3f" % (device, t1-t0) #print "estimated time for one pass through MNIST with %s: %f" % ( @@ -542,12 +542,12 @@ # Compare results if (verbose or not numpy.allclose(rval_cpu, rval_gpu, rtol=1e-5, atol=float_atol)): - print "At batch:", i + 1 - print "CPU:", rval_cpu - print "GPU:", rval_gpu - print "abs diff:", numpy.absolute(rval_gpu - rval_cpu) - print "rel diff:", numpy.absolute(( - rval_gpu - rval_cpu) / rval_gpu) + print("At batch:", i + 1) + print("CPU:", rval_cpu) + print("GPU:", rval_gpu) + print("abs diff:", numpy.absolute(rval_gpu - rval_cpu)) + print("rel diff:", numpy.absolute(( + rval_gpu - rval_cpu) / rval_gpu)) if not ignore_error: assert numpy.allclose(rval_cpu, rval_gpu, @@ -564,14 +564,14 @@ if pickle: if isinstance(cpu_mode, theano.compile.ProfileMode): import pickle - print "BEGIN CPU profile mode dump" - print pickle.dumps(cpu_mode) - print "END CPU profile mode dump" + print("BEGIN CPU profile mode dump") + print(pickle.dumps(cpu_mode)) + print("END CPU profile mode dump") if isinstance(gpu_mode, theano.compile.ProfileMode): import pickle - print "BEGIN GPU profile mode dump" - print pickle.dumps(gpu_mode) - print "END GPU profile mode dump" + print("BEGIN GPU profile mode dump") + print(pickle.dumps(gpu_mode)) + print("END GPU profile mode dump") #print "CPU time: %.3f, GPU time: %.3f, speed up %f" % ( # (time_cpu, time_gpu, time_cpu/time_gpu)) --- ./theano/sandbox/cuda/tests/test_memory.py (original) +++ ./theano/sandbox/cuda/tests/test_memory.py (refactored) @@ -70,42 +70,42 @@ # more_alloc1 if after the first compilation, more_alloc2 after the second. for dtype, more_alloc1, more_alloc2 in [("float32", 1, 4), ("float64", 0, 0)]: - print dtype + print(dtype) test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype) some_vector = tensor.vector('some_vector', dtype=dtype) some_matrix = some_vector.reshape(shapes) mem1 = freemem() - print "Before shared variable", mem1 + print("Before shared variable", mem1) variables = cuda.shared_constructor(np.ones((shapes[1],), dtype='float32')) derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables)) - print "Shared took ", np.prod(variables.get_value( + print("Shared took ", np.prod(variables.get_value( borrow=True, - return_internal_type=True).shape) * 4 / 1024, "kB" + return_internal_type=True).shape) * 4 / 1024, "kB") mem2 = freemem() - print "Before compilation", mem2 + print("Before compilation", mem2) mem2_1 = freemem(extra_alloc=more_alloc1) mem2_2 = freemem(extra_alloc=more_alloc2) obj = theano.function([some_vector], derp, mode=mode_with_gpu) mem3 = freemem() - print "After function compilation 1", mem3 + print("After function compilation 1", mem3) assert mem2_1 == mem3, (mem2_1, mem3) grad_derp = tensor.grad(derp, some_vector) grad = theano.function([some_vector], grad_derp, mode=mode_with_gpu) mem4 = freemem() - print "After function compilation 2", mem4 + print("After function compilation 2", mem4) asserRefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_gradient.py RefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_driver.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_cuda_ndarray.py t mem2_2 == mem4, (mem2_2, mem4) for i in range(3): obj(test_params) - print "After function evaluation 1", freemem() + print("After function evaluation 1", freemem()) assert mem2_2 == freemem(), (mem2_2, freemem()) grad(test_params) - print "After function evaluation 2", freemem() + print("After function evaluation 2", freemem()) assert mem2_2 == freemem(), (mem2_2, freemem()) del obj @@ -113,11 +113,11 @@ #assert mem2 == freemem(), (mem2, freemem()) del grad - print "After deleting function 2", freemem() + print("After deleting function 2", freemem()) assert mem2 == freemem(), (mem2, freemem()) del derp, variables, grad_derp - print "After deleting shared variable and ref to it", freemem() + print("After deleting shared variable and ref to it", freemem()) assert mem1 == freemem(), (mem1, freemem()) @@ -137,7 +137,7 @@ # more_alloc1 if after the first compilation, more_alloc2 after the second. for dtype, more_alloc1 in [("float32", 2), ("float64", 0)]: - print dtype + print(dtype) test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype) some_vector = tensor.vector('some_vector', dtype=dtype) @@ -145,39 +145,39 @@ branch_select = tensor.iscalar() mem1 = freemem() - print "Before shared variable", mem1 + print("Before shared variable", mem1) variables = cuda.shared_constructor(np.ones((shapes[1],), dtype='float32')) derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables)) derp = ifelse.IfElse(1)(branch_select, derp, some_matrix[:shapes[0]].sum()) derp += 1 - print "Shared took ", np.prod(variables.get_value( + print("Shared took ", np.prod(variables.get_value( borrow=True, - return_internal_type=True).shape) * 4 / 1024, "kB" + return_internal_type=True).shape) * 4 / 1024, "kB") mem2 = freemem() - print "Before compilation", mem2 + print("Before compilation", mem2) mem2_1 = freemem(extra_alloc=more_alloc1) obj = theano.function([some_vector, branch_select], derp, mode=mode_with_gpu) #theano.printing.debugprint(obj, print_type=True) mem3 = freemem() - print "After function compilation 1", mem3 + print("After function compilation 1", mem3) assert mem2_1 == mem3, (mem2_1, mem3) for i in range(3): obj(test_params, 1) - print "After function evaluation branch true", freemem() + print("After function evaluation branch true", freemem()) assert mem2_1 == freemem(), (mem2_1, freemem()) obj(test_params, 0) - print "After function evaluation branch false", freemem() + print("After function evaluation branch false", freemem()) assert mem2_1 == freemem(), (mem2_1, freemem()) del obj - print "After deleting function 1", freemem() + print("After deleting function 1", freemem()) assert mem2 == freemem(), (mem2, freemem()) del derp, variables - print "After deleting shared variable and ref to it", freemem() + print("After deleting shared variable and ref to it", freemem()) assert mem1 == freemem(), (mem1, freemem()) --- ./theano/sandbox/cuda/tests/test_cuda_ndarray.py (original) +++ ./theano/sandbox/cuda/tests/test_cuda_ndarray.py (refactored) @@ -461,7 +461,7 @@ try: bb = b.reshape(shape_2) - except Exception, ValueError: + except Exception as ValueError: return assert False @@ -509,7 +509,7 @@ offset = 0 b_strides = b._strides - for i in xrange(len(b.shape)): + for i in rangRefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py e(len(b.shape)): offset += (b.shape[i]-1) * b_strides[i] v._set_stride(i, -b_strides[i]) @@ -566,8 +566,8 @@ def _cmp(x,y): assert x.shape == y.shape if not numpy.all(x == y): - print x - print y + print(x) + print(y) assert numpy.all(x == y) def _cmpf(x,*y): @@ -746,7 +746,7 @@ # attempt to assign the ndarray b with setitem _a[:,1,1] = _b assert False - except ValueError, e: + except ValueError as e: #print e assert True @@ -755,7 +755,7 @@ # attempt to assign the ndarray b with setitem _a[1,1,:] = b assert False - except ValueError, e: + except ValueError as e: #print e assert True @@ -772,7 +772,7 @@ # attempt to assign the ndarray b with setitem _a[:,:,1] = _b assert False - except ValueError, e: + except ValueError as e: #print e assert True @@ -781,7 +781,7 @@ # attempt to assign the ndarray b with setitem _a[1,:,:] = b assert False - except ValueError, e: + except ValueError as e: #print e assert True @@ -798,7 +798,7 @@ # attempt to assign the ndarray b with setitem _a[1,:,:] = b assert False - except TypeError, e: + except TypeError as e: #print e assert True @@ -933,7 +933,7 @@ _a[0, :, :] = mat #a[0, :, :] = mat #assert numpy.allclose(numpy.asarray(_a), a) - except ValueError, e: + except ValueError as e: pass #test direct transfert from numpy with broadcast @@ -983,7 +983,7 @@ # Test that the 'base' attribute of a CudaNdarray is the one # built initially, not an intermediate one. a = cuda_ndarray.CudaNdarray.zeros((3,4,5)) - for i in xrange(5): + for i in range(5): b = a[:] assert b.base is a --- ./theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py (original) +++ ./theano/sandbox/cuda/tests/test_conv_cuda_ndarray.py (refactored) @@ -55,10 +55,10 @@ img.shape[2] - kern.shape[2] + 1, img.shape[3] - kern.shape[3] + 1) out = numpy.zeros(outshp, dtype='float32') - for b in xrange(out.shape[0]): - for k in xrange(out.shape[1]): - for rr in xrange(out.shape[2]): - for cc in xrange(out.shape[3]): + for b in range(out.shape[0]): + for k in range(out.shape[1]): + for rr in range(out.shape[2]): + for cc in range(out.shape[3]): #rr, cc is the upper-left corner of img patches imgpatch = img[b, :, rr:rr + kern.shape[2], cc:cc + kern.shape[3]] @@ -109,9 +109,9 @@ img.shape[2] + kern.shape[2] - 1, img.shape[3] + kern.shape[3] - 1) out = numpy.zeros(outshp, dtype='float32') - for b in xrange(out.shape[0]): - for k in xrange(out.shape[1]): - for s in xrange(img.shape[1]): + for b in range(out.shape[0]): + for k in range(out.shape[1]): + for s in range(img.shape[1]): out[b, k, :, :] += convolve2d(img[b, s, :, :], kern[k, s, :, :], mode) @@ -119,7 +119,7 @@ def _params_allgood_header(): - print "ishape kshape #Mflops CPU Mflops GPU Mflops Speedup" + print("ishape kshape #Mflops CPU Mflops GPU Mflops Speedup") def _params_allgood(ishape, kshape, mode, subsample=(1, 1), img_stride=(1, 1), @@ -189,14 +189,14 @@ assert (numpy.asarray(gpuval) == numpy.asarray(gpuval2)).all() gpuval = numpy.asarray(gpuval) if gpuval.shape != cpuval.shape: - print >> sys.stdout, "ERROR: shape mismatch", - print >> sys.stdout, gpuval.shape, cpuval.shape + print("ERROR: shape mismatch", end=' ', file=sys.stdout) + print(gpuval.shape, cpuval.shape, file=sys.stdout) rval = False if rval: rval = numpy.allclose(cpuval, gpuval, rtol=rtol) assert numpy.all(numpy.isfinite(gpuval)) - except NotImplementedError, e: - print >> sys.stdout, '_params_allgood Failed allclose', e + except NotImplementedError as e: + print('_params_allgood Failed allclose', e, file=sys.stdout) rval = False if (t2 is not None): @@ -209,38 +209,38 @@ cpu_mflops = approx_fp / (t1 - t0) gpu_mflops = approx_fp / (t2 - t1) if verbose > 0: - print >> sys.stdout, '%15s' % str(ishape), '%15s' % str(kshape), - print >> sys.stdout, '%12.5f %7.2f %7.2f %7.1f' % (approx_fp, - cpu_mflops, gpu_mflops, (t1 - t0) / (t2 - t1)) + print('%15s' % str(ishape), '%15s' % str(kshape), end=' ', file=sys.stdout) + print('%12.5f %7.2f %7.2f %7.1f' % (approx_fp, + cpu_mflops, gpu_mflops, (t1 - t0) / (t2 - t1)), file=sys.stdout) if not rval: - print >> sys.stdout, ('test_' + mode + ' id=' + str(id) + + print(('test_' + mode + ' id=' + str(id) + ' FAILED for ishape, kshape, mode, subsample,' + ' img_stride, kern_stride, version', ishape, kshape, mode, subsample, img_stride, kern_stride, - version) + version), file=sys.stdout) diff = cpuval - gpuval diffabs = numpy.absolute(diff) pr_diff = diffabs / numpy.absolute(cpuval) nb_close = (diffabs <= (atol + rtol * numpy.absolute(gpuval))).sum() - print "max absolute diff:", (diffabs.max(), "avg abs diff:", - numpy.average(diffabs)) - print "median abs diff:", (numpy.median(diffabs), "nb close:", - nb_close, "/", diff.size) - print "max relatif diff:", (pr_diff.max(), "avg rel diff:", - numpy.average(pr_diff)) + print("max absolute diff:", (diffabs.max(), "avg abs diff:", + numpy.average(diffabs))) + print("median abs diff:", (numpy.median(diffabs), "nb close:", + nb_close, "/", diff.size)) + print("max relatif diff:", (pr_diff.max(), "avg rel diff:", + numpy.average(pr_diff))) if not rval and print_ != False: if npy_img.shape[0] > 5: - print "img", npy_img[0] - print "kern", npy_kern[0] - print "gpu", gpuval[0][0] - print "cpu", cpuval[0][0] - print "diff", diff[0][0] + print("img", npy_img[0]) + print("kern", npy_kern[0]) + print("gpu", gpuval[0][0]) + print("cpu", cpuval[0][0]) + print("diff", diff[0][0]) else: - print "img", npy_img - print "kern", npy_kern - print "gpu", gpuval - print "cpu", cpuval - print "diff", diff + print("img", npy_img) + print("kern", npy_kern) + print("gpu", gpuval) + print("cpu", cpuval) + print("diff", diff) return rval @@ -273,9 +273,9 @@ print_=print_, rtol=rtol, ones=ones) - except Exception, e: - print ver, id, (ishape, kshape, subshape, istride, kstride) - print e + except Exception as e: + print(ver, id, (ishape, kshape, subshape, istride, kstride)) + print(e) pass if not ret: failed_version.add(ver) @@ -283,11 +283,11 @@ nb_failed += 1 nb_tests += 1 if nb_failed > 0: - print "nb_failed", nb_failed, "on", nb_tests, - print "failed_version", failed_version, "failed_id", failed_id + print("nb_failed", nb_faileRefactoringTool: No changes to ./theano/sandbox/cuda/tests/test_blas.py RefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_bench_loopfusion.py d, "on", nb_tests, end=' ') + print("failed_version", failed_version, "failed_id", failed_id) assert nb_failed == 0, nb_failed else: - print 'Executed', nb_tests, 'different shapes' + print('Executed', nb_tests, 'different shapes') def get_basic_shapes(): @@ -801,8 +801,8 @@ gpuval = cuda_ndarray.conv(img, kern, mode, subsample) t2 = time.time() gpuval = numpy.asarray(gpuval) - print gpuval - print cpuval + print(gpuval) + print(cpuval) def benchmark(): --- ./theano/sandbox/cuda/tests/test_bench_loopfusion.py (original) +++ ./theano/sandbox/cuda/tests/test_bench_loopfusion.py (refactored) @@ -9,7 +9,7 @@ # so state is ignored # since this job is not restartable, channel is also ignored -import logging, StringIO, time, sys +import logging, io, time, sys import numpy @@ -74,12 +74,12 @@ if use_softmax_w: w = shared_uniform(low=-.1, high=.1, size=(n_out, n_terms), name='Kouh2008::w') w_sm = theano.tensor.nnet.softmax(w) - w_list = [w_sm[:,i] for i in xrange(n_terms)] + w_list = [w_sm[:,i] for i in range(n_terms)] w_l1 = abs(w).sum() w_l2_sqr = (w**2).sum() else: w_list = [shared_uniform(low=-2.0/n_terms, high=2.0/n_terms, size=(n_out,), name='w_%i'%i) - for i in xrange(n_terms)] + for i in range(n_terms)] w_l1 = sum(abs(wi).sum() for wi in w_list) w_l2_sqr = sum((wi**2).sum() for wi in w_list) @@ -158,15 +158,15 @@ return _shared_uniform(rng, low, high, size, dtype, name) f_list = [shared_uniform(low=-2.0/numpy.sqrt(n_in), high=2.0/numpy.sqrt(n_in), size=(n_in, n_out), name='f_%i'%i) - for i in xrange(n_terms)] + for i in range(n_terms)] b_list = [shared_uniform(low=0, high=.01, size=(n_out,), name='b_%i'%i) - for i in xrange(n_terms)] + for i in range(n_terms)] #x_list = [theano._asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)] filter_range = theano._asarray(filter_range, dtype=dtype) half_filter_range = theano._asarray(filter_range/2, dtype=dtype) x_list = [theano._asarray(filter_range + eps, dtype=dtype)+half_filter_range *softsign(tensor.dot(input, f_list[i]) + - b_list[i]) for i in xrange(n_terms)] + b_list[i]) for i in range(n_terms)] rval = cls.new_expbounds(rng, x_list, n_out, dtype=dtype, params=f_list + b_list, exponent_range=exponent_range) @@ -202,10 +202,10 @@ def pixel_range(x): return 255 * (x - x.min()) / (x.max() - x.min() + eps) - for r in xrange(rows): + for r in range(rows): out_r_low = r*(row_gap + filter_shape[0]) out_r_high = out_r_low + filter_shape[0] - for c in xrange(cols): + for c in range(cols): out_c_low = c*(col_gap + filter_shape[1]) out_c_high = out_c_low + filter_shape[1] out_tile = out_array[out_r_low:out_r_high, out_c_low:out_c_high,:] @@ -218,14 +218,14 @@ if w_col < w.shape[1]: #filters after the 3rd do not get rendered, but are skipped over. # there are only 3 colour channels. - for i in xrange(min(self.n_E_quadratic,3)): + for i in range(min(self.n_E_quadratic,3)): out_tile[:,:,i] = pixel_range(w[:,w_col+i]).reshape(filter_shape) w_col += self.n_E_quadratic if c % 3 == 2: # S filters if w_col < w.shape[1]: #filters after the 3rd do not get rendered, but are skipped over. # there are only 3 colour channels. - for i in xrange(min(self.n_S_quadratic,3)): + for i in range(min(self.n_S_quRefactoringTool: Refactored ./theano/sandbox/cuda/tests/test_basic_ops.py adratic,3)): out_tile[:,:,2-i] = pixel_range(w[:,w_col+i]).reshape(filter_shape) w_col += self.n_S_quadratic return Image.fromarray(out_array, 'RGB') @@ -307,7 +307,7 @@ assert cost.type.ndim == 0 - print layer.params + print(layer.params) gparams = theano.tensor.grad(cost, layer.params) updates = [(p, p - s_lr*gp) for p, gp in zip(layer.params, gparams)] @@ -319,5 +319,5 @@ dtype=conf.dtype2, ) yval = numpy.arange(conf.ft_batchsize) - for i in xrange(n_iter): + for i in range(n_iter): train_nll(xval, yval, conf.lr) --- ./theano/sandbox/cuda/tests/test_basic_ops.py (original) +++ ./theano/sandbox/cuda/tests/test_basic_ops.py (refactored) @@ -146,7 +146,7 @@ f_caused_value_error = False try: f_out = f(val) - except ValueError, e: + except ValueError as e: exc = e f_caused_value_error = True except NotImplementedError: @@ -158,23 +158,23 @@ f2_caused_value_error = False try: f2_out = f2(val) - except ValueError, e: + except ValueError as e: exc2 = e f2_caused_value_error = True if f_caused_value_error != f2_caused_value_error: if f_caused_value_error: - print 'f caused this value error:' - print exc + print('f caused this value error:') + print(exc) else: - print 'f did not raise a value error, but should have' + print('f did not raise a value error, but should have') if f2_caused_value_error: - print 'f2 caused this value error:' - print exc2 + print('f2 caused this value error:') + print(exc2) else: - print 'f should not have raised a value error' - print 'shape was: ', shape - print 'pattern was: ', pattern + print('f should not have raised a value error') + print('shape was: ', shape) + print('pattern was: ', pattern) assert False try: @@ -204,7 +204,7 @@ pat = tensor_pattern_to_gpu_pattern(shape, pattern) a = tensor.TensorType('float32', (False,) * len(shape))() - dim_pattern = range(len(shape)) + dim_pattern = list(range(len(shape))) dim_pattern[0] = 1 dim_pattern[1] = 0 a = a.dimshuffle(dim_pattern) @@ -349,7 +349,7 @@ f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu) #check that we work inplace. - assert f.maker.fgraph.toposort()[1].op.destroy_map.items() == [(0, [0])] + assert list(f.maker.fgraph.toposort()[1].op.destroy_map.items()) == [(0, [0])] a0 = a.get_value() * 1.0 f(numpy.ones((4, 4), dtype='float32')) @@ -951,9 +951,9 @@ # The variable fast is used to set the member perform_using_take of # the Op. It is only useful for testing that we use the fast # version when we should. Users should not use it. - for shape, idx, fast in [((70000,), range(70000), True), - ((70000, 5), range(70000), True), - ((70000, 2, 3), range(70000), True), + for shape, idx, fast in [((70000,), list(range(70000)), True), + ((70000, 5), list(range(70000)), True), + ((70000, 2, 3), list(range(70000)), True), ((1025, 1025), [5, 10], True), ((3, 1025, 1026), [1, 2], True), ((1025, 67000), [5, 10], True), @@ -1080,11 +1080,11 @@ for nb_dim in [2, 3, 4, 5]: RefactoringTool: No changes to ./theano/sandbox/cuda/rng_curand.py RefactoringTool: Refactored ./theano/sandbox/cuda/opt.py shapes = [rng.randint(1, 5) for i in range(nb_dim)] args = [numpy.cast['float32'](rng.randn(*shapes)) - for arg in xrange(0, num_args)] + for arg in range(0, num_args)] symb_args = [theano.tensor.TensorType('float32', (False,)*nb_dim)() - for arg in xrange(0, num_args)] + for arg in range(0, num_args)] outputs = [] @@ -1252,7 +1252,7 @@ f = theano.function([vec], var[vec], mode=mode_with_gpu) for i in range(100): f(idx) - print "ProfileMode with batch size", batch_size + print("ProfileMode with batch size", batch_size) mode_with_gpu.print_summary() if __name__ == '__main__': --- ./theano/sandbox/cuda/opt.py (original) +++ ./theano/sandbox/cuda/opt.py (refactored) @@ -1,4 +1,5 @@ import logging +from functools import reduce _logger = logging.getLogger('theano.sandbox.cuda.opt') import copy @@ -623,8 +624,7 @@ if rval.type == node.outputs[0].type: return [rval] else: - print >> sys.stderr, \ - "WARNING: local_gpu_careduce got type wrong" + print("WARNING: local_gpu_careduce got type wrong", file=sys.stderr) return None else: @@ -640,7 +640,7 @@ new_in_shp = [x_shape[0]] new_mask = [reduce_mask[0]] - for i in xrange(1, x.type.ndim): + for i in range(1, x.type.ndim): if reduce_mask[i] == reduce_mask[i - 1]: new_in_shp[-1] *= x_shape[i] else: @@ -663,8 +663,7 @@ if unreshaped_reduce.type == node.outputs[0].type: return [unreshaped_reduce] else: - print >> sys.stderr, \ - "WARNING: local_gpu_careduce got type wrong" + print("WARNING: local_gpu_careduce got type wrong", file=sys.stderr) return None return False @@ -1220,7 +1219,7 @@ assert int_size == gpu_int_size del gpu_int_size del t - except Exception, e: + except Exception as e: _logger.warning(("Optimization Warning: " "Got the following error, but we can ignore it. " "This could cause less GpuElemwise fused together.\n" @@ -1284,7 +1283,7 @@ return False while len(node.inputs) > max_nb_inputs: inner_op = [] - for i in xrange(0, + for i in range(0, len(node.inputs), max_nb_inputs): inner_op.append(node.op(*node.inputs[i: i + max_nb_inputs])) @@ -1530,8 +1529,8 @@ scan_outs = [safe_to_gpu(x) for x in thescan.outputs] scan_outs = scan_utils.clone( scan_outs, - replace=zip(thescan.inputs, - [safe_to_cpu(x) for x in scan_ins])) + replace=list(zip(thescan.inputs, + [safe_to_cpu(x) for x in scan_ins]))) # We need to construct the hash here, because scan # __init__ does not know about cuda ndarray and can not # handle graphs with inputs being Cuda Ndarrays @@ -1578,8 +1577,8 @@ scan_outs = [safe_to_gpu(x) for x in thescan.outputs] scan_outs = scan_utils.clone( scan_outs, - replace=zip(thescan.inputs, - [safe_to_cpu(x) for x in scan_ins])) + replace=list(zip(thescan.inputs, + [safe_to_cpu(x) for x in scan_ins]))) # We need to construct the hash here, because sRefactoringTool: Refactored ./theano/sandbox/cuda/nvcc_compiler.py RefactoringTool: Refactored ./theano/sandbox/cuda/nnet.py RefactoringTool: No changes to ./theano/sandbox/cuda/neighbours.py RefactoringTool: No changes to ./theano/sandbox/cuda/kernel_codegen.py RefactoringTool: Refactored ./theano/sandbox/cuda/elemwise.py can # __init__ does not know about cuda ndarray and can not --- ./theano/sandbox/cuda/nvcc_compiler.py (original) +++ ./theano/sandbox/cuda/nvcc_compiler.py (refactored) @@ -1,4 +1,4 @@ -import commands +import subprocess import distutils import logging import os @@ -271,7 +271,7 @@ # On the mac, nvcc is not able to link using -framework # Python, so we have manually add the correct library and # paths - darwin_python_lib = commands.getoutput('python-config --ldflags') + darwin_python_lib = subprocess.getoutput('python-config --ldflags') else: # sometimes, the linker cannot find -lpython so we need to tell it # explicitly where it is located @@ -359,7 +359,7 @@ cmd.pop(indexof) # Remove -framework cmd.pop(indexof) # Remove argument to -framework cmd.extend(newarg) - except ValueError, e: + except ValueError as e: done = True # Remove "-u Symbol" arguments, since they are usually not @@ -372,7 +372,7 @@ indexof = cmd.index('-u') cmd.pop(indexof) # Remove -u cmd.pop(indexof) # Remove argument to -u - except ValueError, e: + except ValueError as e: done = True # Fix for MacOS X. @@ -411,8 +411,8 @@ if p.returncode: for i, l in enumerate(src_code.split('\n')): - print >> sys.stderr, i + 1, l - print >> sys.stderr, '===============================' + print(i + 1, l, file=sys.stderr) + print('===============================', file=sys.stderr) # filter the output from the compiler for l in nvcc_stderr.split('\n'): if not l: @@ -426,17 +426,17 @@ continue except Exception: pass - print >> sys.stderr, l - print nvcc_stdout - print cmd + print(l, file=sys.stderr) + print(nvcc_stdout) + print(cmd) raise Exception('nvcc return status', p.returncode, 'for cmd', ' '.join(cmd)) elif config.cmodule.compilation_warning and nvcc_stdout: - print nvcc_stdout + print(nvcc_stdout) if nvcc_stdout: # this doesn't happen to my knowledge - print >> sys.stderr, "DEBUG: nvcc STDOUT", nvcc_stdout + print("DEBUG: nvcc STDOUT", nvcc_stdout, file=sys.stderr) if py_module: #touch the __init__ file --- ./theano/sandbox/cuda/nnet.py (original) +++ ./theano/sandbox/cuda/nnet.py (refactored) @@ -94,7 +94,7 @@ classname = self.__class__.__name__ fail = sub['fail'] sio = StringIO() - print >> sio, """ + print(""" if (%(y_idx)s->nd != 1) { PyErr_SetString(PyExc_ValueError, "y_idx not 1d tensor"); @@ -206,7 +206,7 @@ %(fail)s; } } - """ % locals() + """ % locals(), file=sio) return sio.getvalue() def c_code_cache_version(self): --- ./theano/sandbox/cuda/elemwise.py (original) +++ ./theano/sandbox/cuda/elemwise.py (refactored) @@ -78,57 +78,57 @@ #print 'C_SRC_KERNEL', sio.getvalue() for ipos, i in enumerate(node.inputs): - print >> sio, "// Input ", ipos, str(i.type) + print("// Input ", ipos, str(i.type), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, "// Output ", ipos, str(i.type) - print >> sio, "static __global__ void kernel_%s_%s_%s(unsigned int numEls" % ( - self.scalar_op.__class__.__name__, nodename, nd) + print("// Output ", ipos, str(i.type), file=sio) + print("static __global__ void kernel_%s_%s_%s(unsigned int numEls" % ( + self.scalar_op.__class__.__name__, nodename, nd), file=sio) if (nd): - print >> sio, "\t,", ", ".join("const int dim%i" % i - for i in xrange(nd)) + print("\t,", ", ".join("const int dim%i" % i + for i in range(nd)), file=sio) #declare inputs for ipos, i in enumerate(node.inputs): s = ", ".join(["const float * i%i_data" % ipos] + - ["int i%i_str_%i" % (ipos, d) for d in xrange(nd)]) - print >> sio, "\t,", s + ["int i%i_str_%i" % (ipos, d) for d in range(nd)]) + print("\t,", s, file=sio) #declare outputs for ipos, i in enumerate(node.outputs): s = ", ".join(["float * o%i_data" % ipos] + - ["int o%i_str_%i" % (ipos, d) for d in xrange(nd)]) - print >> sio, "\t,", s + ["int o%i_str_%i" % (ipos, d) for d in range(nd)]) + print("\t,", s, file=sio) #print >> sio, "\t,", ", ".join("int o%i_str_%i" % (ipos, d) for d in xrange(nd)) #print >> sio, "\t,", "float * o%i_data" % ipos - print >> sio, "\t)\n{" - print >> sio, " const int idx = blockIdx.x * blockDim.x + threadIdx.x;" - print >> sio, " const int numThreads = blockDim.x * gridDim.x;" + print("\t)\n{", file=sio) + print(" const int idx = blockIdx.x * blockDim.x + threadIdx.x;", file=sio) + print(" const int numThreads = blockDim.x * gridDim.x;", file=sio) # For each input that is a scalar which has been broadcasted to a tensor, # load it into a local variable for ipos, i in enumerate(node.inputs): if _logical_scalar(i): - print >> sio, " const float ii_i%i_value = i%i_data[0];" % (ipos, ipos) + print(" const float ii_i%i_value = i%i_data[0];" % (ipos, ipos), file=sio) #loop over the elements to be treated by this kernel call - print >> sio, " for (int i = idx; i < numEls; i += numThreads) {" + print(" for (int i = idx; i < numEls; i += numThreads) {", file=sio) # calculate the data pointers for all arguments - print >> sio, " int ii = i;" + print(" int ii = i;", file=sio) for ipos, i in enumerate(node.inputs): if not _logical_scalar(i): - print >> sio, " const float * ii_i%i_data = i%i_data;" % (ipos, ipos) + print(" const float * ii_i%i_data = i%i_data;" % (ipos, ipos), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, " float * ii_o%i_data = o%i_data;" % (ipos, ipos) - for d in xrange(nd-1, -1, -1): + print(" float * ii_o%i_data = o%i_data;" % (ipos, ipos), file=sio) + for d in range(nd-1, -1, -1): if d > 0: - print >> sio, " int pos%i = ii %% dim%i;" %(d, d) - print >> sio, " ii = ii / dim%i;" %d + print(" int pos%i = ii %% dim%i;" %(d, d), file=sio) + print(" ii = ii / dim%i;" %d, file=sio) else: - print >> sio, " int pos%i = ii;" %d + print(" int pos%i = ii;" %d, file=sio) for ipos, i in enumerate(node.inputs): if not _logical_scalar(i): - print >> sio, " ii_i%i_data += pos%i * i%i_str_%i;" % (ipos, d, ipos, d) + print(" ii_i%i_data += pos%i * i%i_str_%i;" % (ipos, d, ipos, d), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, " ii_o%i_data += pos%i * o%i_str_%i;" % (ipos, d, ipos, d) + print(" ii_o%i_data += pos%i * o%i_str_%i;" % (ipos, d, ipos, d), file=sio) # perform the scalar operation on the input and output references #TODO: What if the scalar_op needs support_code?? @@ -142,13 +142,13 @@ get_str_list_logical_scalar(node), ['ii_o%i_data[0]' % ipos for ipos, i in enumerate(node.outputs)], sub=dict(fail='return;')) # TODO: set a failure code somehow!!! - print >> sio, " ", task_code - print >> sio, " }" + print(" ", task_code, file=sio) + print(" }", file=sio) #indent = " "*(4*d+7) #for ipos, i in enumerate(node.inputs): #print >> sio, indent, "const float * i%i" % ipos, '= i%i_data', '' - print >> sio, "}" + print("}", file=sio) #print sio.getvalue() return sio.getvalue() @@ -187,44 +187,44 @@ if nd in (4,): # print some leading comments to make the code easier to read for ipos, i in enumerate(node.inputs): - print >> sio, "// Input ", ipos, str(i.type) + print("// Input ", ipos, str(i.type), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, "// Output ", ipos, str(i.type) - print >> sio, "static __global__ void kernel_%s_%s_%s(unsigned int numEls" %( + print("// Output ", ipos, str(i.type), file=sio) + print("static __global__ void kernel_%s_%s_%s(unsigned int numEls" %( self.scalar_op.__class__.__name__, nodename, - 'tiling%i'%nd) + 'tiling%i'%nd), file=sio) if (nd): - print >> sio, "\t,", ", ".join("const int dim%i" % i for i in xrange(nd)) + print("\t,", ", ".join("const int dim%i" % i for i in range(nd)), file=sio) #declare inputs for ipos, i in enumerate(node.inputs): - s = ", ".join(["const float * i%i_data" % ipos] + list("int i%i_str_%i" % (ipos, d) for d in xrange(nd))) - print >> sio, "\t,", s + s = ", ".join(["const float * i%i_data" % ipos] + list("int i%i_str_%i" % (ipos, d) for d in range(nd))) + print("\t,", s, file=sio) #declare outputs for ipos, i in enumerate(node.outputs): - s = ", ".join(["float * o%i_data" % ipos] + list("int o%i_str_%i" % (ipos, d) for d in xrange(nd))) - print >> sio, "\t,", s + s = ", ".join(["float * o%i_data" % ipos] + list("int o%i_str_%i" % (ipos, d) for d in range(nd))) + print("\t,", s, file=sio) #print >> sio, "\t,", ", ".join("int o%i_str_%i" % (ipos, d) for d in xrange(nd)) #print >> sio, "\t,", "float * o%i_data" % ipos - print >> sio, "\t)\n{" + print("\t)\n{", file=sio) # For each input that is a scalar which has been broadcasted to a tensor, # load it into a local variable - print >> sio, " __shared__ float value0[%i];" % len(node.inputs) - print >> sio, " __shared__ int shared_dims[%(nd)s];" % locals() + print(" __shared__ float value0[%i];" % len(node.inputs), file=sio) + print(" __shared__ int shared_dims[%(nd)s];" % locals(), file=sio) #print >> sio, " __shared__ int shared_i_str[%(n_in)s][%(nd)s]" - print >> sio, " if ((threadIdx.x == 0) && (threadIdx.y == 0)) {" + print(" if ((threadIdx.x == 0) && (threadIdx.y == 0)) {", file=sio) for ipos, i in enumerate(node.inputs): if _logical_scalar(i): - print >> sio, " value0[%i] = i%i_data[0];" % (ipos, ipos) - for ipos in xrange(nd): - print >> sio, " shared_dims[%i] = dim%i;" % (ipos, ipos) - print >> sio, " }" - print >> sio, " __syncthreads();" + print(" value0[%i] = i%i_data[0];" % (ipos, ipos), file=sio) + for ipos in range(nd): + print(" shared_dims[%i] = dim%i;" % (ipos, ipos), file=sio) + print(" }", file=sio) + print(" __syncthreads();", file=sio) if (nd == 4): - print >> sio, """ + print(""" for (int pos0 = blockIdx.x; pos0 < shared_dims[0]; pos0 += gridDim.x) { for (int pos1 = blockIdx.y; pos1 < shared_dims[1]; pos1 += gridDim.y) @@ -235,21 +235,21 @@ //for (int pos3 = threadIdx.y; pos3 < shared_dims[3]; pos3 += blockDim.y) for (int pos3 = threadIdx.x; pos3 < shared_dims[3]; pos3 += blockDim.x) { - """ + """, file=sio) else: raise NotImplementedError() for ipos, i in enumerate(node.inputs): if not _logical_scalar(i): - print >> sio, " const float * ii_i%i_data = i%i_data;" % (ipos, ipos) + print(" const float * ii_i%i_data = i%i_data;" % (ipos, ipos), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, " float * ii_o%i_data = o%i_data;" % (ipos, ipos) - for d in xrange(nd): + print(" float * ii_o%i_data = o%i_data;" % (ipos, ipos), file=sio) + for d in range(nd): for ipos, i in enumerate(node.inputs): if not _logical_scalar(i): - print >> sio, " ii_i%i_data += pos%i * i%i_str_%i;" % (ipos, d, ipos, d) + print(" ii_i%i_data += pos%i * i%i_str_%i;" % (ipos, d, ipos, d), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, " ii_o%i_data += pos%i * o%i_str_%i;" % (ipos, d, ipos, d) + print(" ii_o%i_data += pos%i * o%i_str_%i;" % (ipos, d, ipos, d), file=sio) # perform the scalar operation on the input and output references #TODO: What if the scalar_op needs support_code?? @@ -261,9 +261,9 @@ , get_str_list_logical_scalar(node, value_str='value0[%i]') , ['ii_o%i_data[0]'%ipos for ipos, i in enumerate(node.outputs)] , sub=dict(fail='return;')) #TODO: set a failure code somehow!!! - print >> sio, " ", task_code - - print >> sio, " }" * nd + print(" ", task_code, file=sio) + + print(" }" * nd, file=sio) #TODO: insert runtime stride checks that select the best loop order either here, or in # the host code that launched the kernel (host code probably better spot) @@ -271,9 +271,9 @@ #indent = " "*(4*d+7) #for ipos, i in enumerate(node.inputs): #print >> sio, indent, "const float * i%i" % ipos, '= i%i_data', '' - print >> sio, "}" - - print sio.getvalue() + print("}", file=sio) + + print(sio.getvalue()) return sio.getvalue() def c_src_kernel_tiling_less_registers(self, node, nodename): @@ -289,26 +289,26 @@ # print some leading comments to make the code easier to read for ipos, i in enumerate(node.inputs): - print >> sio, "// Input ", ipos, str(i.type) + print("// Input ", ipos, str(i.type), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, "// Output ", ipos, str(i.type) - print >> sio, "static __global__ void kernel_%s_%s_%s(unsigned int numEls" %( + print("// Output ", ipos, str(i.type), file=sio) + print("static __global__ void kernel_%s_%s_%s(unsigned int numEls" %( self.scalar_op.__class__.__name__, nodename, - 'tiling%i_less_registers'%nd) + 'tiling%i_less_registers'%nd), file=sio) if (nd): - print >> sio, "\t,", ", ".join("const int dim%i" % i for i in xrange(nd)) + print("\t,", ", ".join("const int dim%i" % i for i in range(nd)), file=sio) #declare inputs for ipos, i in enumerate(node.inputs): - s = ", ".join(["const float * i%i_data_0" % ipos] + list("int i%i_str_%i" % (ipos, d) for d in xrange(nd))) - print >> sio, "\t,", s + s = ", ".join(["const float * i%i_data_0" % ipos] + list("int i%i_str_%i" % (ipos, d) for d in range(nd))) + print("\t,", s, file=sio) #declare outputs for ipos, i in enumerate(node.outputs): - s = ", ".join(["float * o%i_data_0" % ipos] + list("int o%i_str_%i" % (ipos, d) for d in xrange(nd))) - print >> sio, "\t,", s + s = ", ".join(["float * o%i_data_0" % ipos] + list("int o%i_str_%i" % (ipos, d) for d in range(nd))) + print("\t,", s, file=sio) #print >> sio, "\t,", ", ".join("int o%i_str_%i" % (ipos, d) for d in xrange(nd)) #print >> sio, "\t,", "float * o%i_data" % ipos - print >> sio, "\t)\n{" + print("\t)\n{", file=sio) # TODO: Setting these to true makes the function fail SOMETIMES. I don't know why yet. use_shared_stride = False @@ -316,7 +316,7 @@ def decl_limits(nd): if use_shared_limits: - print >> sio, "__shared__ float * limits[%(nd)s];" % locals() + print("__shared__ float * limits[%(nd)s];" % locals(), file=sio) def stride(io, p, d): if use_shared_stride: @@ -332,71 +332,71 @@ def decl_shared_stride(nin, nout, nd): if not use_shared_stride: return - print >> sio, """ + print(""" __shared__ int si_str[%(nin)s][%(nd)s]; __shared__ int so_str[%(nout)s][%(nd)s]; if ((threadIdx.x == 0) && (threadIdx.y == 0)) { - """ % locals() - for i in xrange(nin): - for d in xrange(nd): - print >> sio, "si_str[%(i)s][%(d)s] = i%(i)s_str_%(d)s;" %locals() - for i in xrange(n_out): - for d in xrange(nd): - print >> sio, "so_str[%(i)s][%(d)s] = o%(i)s_str_%(d)s;" %locals() - print >> sio, "} __syncthreads();" + """ % locals(), file=sio) + for i in range(nin): + for d in range(nd): + print("si_str[%(i)s][%(d)s] = i%(i)s_str_%(d)s;" %locals(), file=sio) + for i in range(n_out): + for d in range(nd): + print("so_str[%(i)s][%(d)s] = o%(i)s_str_%(d)s;" %locals(), file=sio) + print("} __syncthreads();", file=sio) def calc_limit(d): s = stride('o', 0, d) lname = limits(d) if use_shared_limits: - print >> sio, "if ((threadIdx.x == 0) && (threadIdx.y == 0)) {" + print("if ((threadIdx.x == 0) && (threadIdx.y == 0)) {", file=sio) if d == 0: - print >> sio, "%(lname)s = o0_data_0 + dim%(d)s * %(s)s;" % locals() + print("%(lname)s = o0_data_0 + dim%(d)s * %(s)s;" % locals(), file=sio) else: dm1 = d - 1 - print >> sio, "%(lname)s = o0_data_%(dm1)s + dim%(d)s * %(s)s;" % locals() - print >> sio, "} __syncthreads();" + print("%(lname)s = o0_data_%(dm1)s + dim%(d)s * %(s)s;" % locals(), file=sio) + print("} __syncthreads();", file=sio) else: if d == 0: - print >> sio, "const float * %(lname)s = o0_data_0 + dim%(d)s * %(s)s;" % locals() + print("const float * %(lname)s = o0_data_0 + dim%(d)s * %(s)s;" % locals(), file=sio) else: dm1 = d - 1 - print >> sio, "const float * %(lname)s = o0_data_%(dm1)s + dim%(d)s * %(s)s;" % locals() + print("const float * %(lname)s = o0_data_%(dm1)s + dim%(d)s * %(s)s;" % locals(), file=sio) def decl_ptrs(d, offset): dm1 = d - 1 assert dm1 >= 0 - for i in xrange(n_in): + for i in range(n_in): s = stride('i', i, d) - print >> sio, "const float * i%(i)s_data_%(d)s = i%(i)s_data_%(dm1)s + %(offset)s * %(s)s;" %locals() - for i in xrange(n_out): + print("const float * i%(i)s_data_%(d)s = i%(i)s_data_%(dm1)s + %(offset)s * %(s)s;" %locals(), file=sio) + for i in range(n_out): s = stride('o', i, d) - print >> sio, "float * o%(i)s_data_%(d)s = o%(i)s_data_%(dm1)s + %(offset)s * %(s)s;" %locals() + print("float * o%(i)s_data_%(d)s = o%(i)s_data_%(dm1)s + %(offset)s * %(s)s;" %locals(), file=sio) def inc_ptrs(d, amt): - for i in xrange(n_in): + for i in range(n_in): s = stride('i', i, d) - print >> sio, "i%(i)s_data_%(d)s += %(amt)s * %(s)s;" %locals() - for i in xrange(n_out): + print("i%(i)s_data_%(d)s += %(amt)s * %(s)s;" %locals(), file=sio) + for i in range(n_out): s = stride('o', i, d) - print >> sio, "o%(i)s_data_%(d)s += %(amt)s * %(s)s;" %locals() + print("o%(i)s_data_%(d)s += %(amt)s * %(s)s;" %locals(), file=sio) def while_limit(d): lname = limits(d) - print >> sio, "while (o0_data_%(d)s < %(lname)s) { " % locals() + print("while (o0_data_%(d)s < %(lname)s) { " % locals(), file=sio) def end_while(d): - print >> sio, "}" + print("}", file=sio) def task_code(d): - print >> sio, self.scalar_op.c_code( + print(self.scalar_op.c_code( Apply(self.scalar_op, [scalar.Scalar(dtype = input.type.dtype)() for input in node.inputs], [scalar.Scalar(dtype = output.type.dtype)() for output in node.outputs]) , nodename + '_scalar_' , ['i%i_data_%i[0]'%(ipos,d) for ipos, i in enumerate(node.inputs)] , ['o%i_data_%i[0]'%(ipos,d) for ipos, i in enumerate(node.outputs)] - , sub=dict(fail='return;')) #TODO: set a failure code somehow!!! + , sub=dict(fail='return;')), file=sio) #TODO: set a failure code somehow!!! if nd == 4: decl_shared_stride(n_in, n_out, nd) @@ -427,8 +427,8 @@ inc_ptrs(0, 'gridDim.x') end_while(0) - print >> sio, "}" - print sio.getvalue() + print("}", file=sio) + print(sio.getvalue()) return sio.getvalue() def c_src_kernel_Ccontiguous(self, node, nodename): @@ -437,29 +437,29 @@ #print 'C_SRC_KERNEL', sio.getvalue() for ipos, i in enumerate(node.inputs): - print >> sio, "// Input ", ipos, str(i.type) + print("// Input ", ipos, str(i.type), file=sio) for ipos, i in enumerate(node.outputs): - print >> sio, "// Output ", ipos, str(i.type) - print >> sio, "static __global__ void kernel_%s_%s_Ccontiguous (unsigned int numEls" %(self.scalar_op.__class__.__name__,nodename) + print("// Output ", ipos, str(i.type), file=sio) + print("static __global__ void kernel_%s_%s_Ccontiguous (unsigned int numEls" %(self.scalar_op.__class__.__name__,nodename), file=sio) #declare inputs for ipos, i in enumerate(node.inputs): - print >> sio, "\t,", "const float * i%i_data" % ipos + print("\t,", "const float * i%i_data" % ipos, file=sio) #declare outputs for ipos, i in enumerate(node.outputs): - print >> sio, "\t,", "float * o%i_data" % ipos - print >> sio, "\t)\n{" - print >> sio, " const int idx = blockIdx.x * blockDim.x + threadIdx.x;" - print >> sio, " const int numThreads = blockDim.x * gridDim.x;" + print("\t,", "float * o%i_data" % ipos, file=sio) + print("\t)\n{", file=sio) + print(" const int idx = blockIdx.x * blockDim.x + threadIdx.x;", file=sio) + print(" const int numThreads = blockDim.x * gridDim.x;", file=sio) # For each input that is a scalar which has been broadcasted to a tensor, # load it into a local variable for ipos, i in enumerate(node.inputs): if _logical_scalar(i): - print >> sio, " const float ii_i%i_value = i%i_data[0];" % (ipos, ipos) + print(" const float ii_i%i_value = i%i_data[0];" % (ipos, ipos), file=sio) #loop over the elements to be treated by this kernel call - print >> sio, " for (int i = idx; i < numEls; i += numThreads) {" + print(" for (int i = idx; i < numEls; i += numThreads) {", file=sio) # perform the scalar operation on the input and output references #TODO: What if the scalar_op needs support_code?? task_code = self.scalar_op.c_code( @@ -471,9 +471,9 @@ , get_str_list_logical_scalar(node, data_str='i%i_data[i]') , ['o%i_data[i]'%ipos for ipos, i in enumerate(node.outputs)] , sub=dict(fail='return;')) #TODO: set a failure code somehow!!! - print >> sio, " ", task_code - print >> sio, " }" - print >> sio, "}" + print(" ", task_code, file=sio) + print(" }", file=sio) + print("}", file=sio) #print sio.getvalue() return sio.getvalue() @@ -504,22 +504,22 @@ d = dict() #input_params and output_params go into the function declaration/definition input_params = ", ".join("const float * i%i_data, const int * i%i_str"%(ipos, ipos) - for ipos in xrange(len(node.inputs))) + for ipos in range(len(node.inputs))) output_params = ", ".join("float * o%i_data, const int * o%i_str"%(ipos, ipos) - for ipos in xrange(len(node.outputs))) + for ipos in range(len(node.outputs))) #input_args and output_args go into the recursive call. input_args = ", ".join("i%i_data, i%i_str"%(ipos, ipos) - for ipos in xrange(len(node.inputs))) + for ipos in range(len(node.inputs))) output_args = ", ".join("o%i_data, o%i_str"%(ipos, ipos) - for ipos in xrange(len(node.outputs))) - - prod_dims = '*'.join(["dims[%i]"%di for di in xrange(nd)]+['1']) + for ipos in range(len(node.outputs))) + + prod_dims = '*'.join(["dims[%i]"%di for di in range(nd)]+['1']) scalar_op=self.scalar_op.__class__.__name__ sio = StringIO() - print >> sio, """ + print(""" static void can_collapse_%(nodename)s(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] @@ -529,84 +529,84 @@ }else collapse[i]=0; } } - """ %locals() - print >> sio, """ + """ %locals(), file=sio) + print(""" static int callkernel_%(nodename)s(unsigned int numEls, const int d, const int * dims, %(input_params)s, %(output_params)s) { numEls = %(prod_dims)s; - """ %locals() + """ %locals(), file=sio) if self.verbose: - print >> sio, """ + print(""" std::cerr << "calling kernel_%(scalar_op)s_%(nodename)s w numEls" << numEls << " dims"<< d << "\\n"; - """ %locals() - print >> sio, 'std::cerr << ' + " << ' ' << ".join(['" "']+list("dims[%i]"%di - for di in xrange(nd)) + ["'\\n';"]) + """ %locals(), file=sio) + print('std::cerr << ' + " << ' ' << ".join(['" "']+list("dims[%i]"%di + for di in range(nd)) + ["'\\n';"]), file=sio) if self.verbose>1: - for ipos in xrange(len(node.inputs)): - print >> sio, """ + for ipos in range(len(node.inputs)): + print(""" std::cerr << " %(ipos)s data strides" << """ %locals() + " << ' ' << ".join(["i%s_data"%ipos] - + list("i%s_str[%i]"%(ipos, di) for di in xrange(nd))) + ''' << "\\n"; ''' - - for ipos in xrange(len(node.outputs)): - print >> sio, """ + + list("i%s_str[%i]"%(ipos, di) for di in range(nd))) + ''' << "\\n"; ''', file=sio) + + for ipos in range(len(node.outputs)): + print(""" std::cerr << " %(ipos)s data strides" << """ %locals() + " << ' ' << ".join(["o%s_data"%ipos] - + list("o%s_str[%i]"%(ipos, di) for di in xrange(nd))) + ''' << "\\n"; ''' + + list("o%s_str[%i]"%(ipos, di) for di in range(nd))) + ''' << "\\n"; ''', file=sio) # collapse dimension that are broadcast in all inputs. # need to be done before contiguous collapse as it will break it. # do the dimensions and the strides if nd > 0: - print >> sio, "int local_dims[%(nd)s];" % locals() + print("int local_dims[%(nd)s];" % locals(), file=sio) else: - print >> sio, "int *local_dims=NULL;" + print("int *local_dims=NULL;", file=sio) if nb_inputs > 0 and nd > 0: - print >> sio, """ + print(""" int local_str[%(nb_inputs)s][%(nd)s]; int local_ostr[%(nb_outputs)s][%(nd)s]; - """ % locals() + """ % locals(), file=sio) else: - print >> sio, """ + print(""" int local_str[1][1]; int local_ostr[1][1]; - """ - print >> sio, """ + """, file=sio) + print(""" int nd_collapse = %(nd)s; for(int i=0;i<%(nd)s;i++){//init new dim local_dims[i]=dims[i]; } - """ % locals() - for ipos in xrange(len(node.inputs)): - print >> sio, """ + """ % locals(), file=sio) + for ipos in range(len(node.inputs)): + print(""" for(int i=0;i<%(nd)s;i++){//init new strides local_str[%(ipos)s][i]=i%(ipos)s_str[i]; } - """ % locals() - for ipos in xrange(len(node.outputs)): - print >> sio, """ + """ % locals(), file=sio) + for ipos in range(len(node.outputs)): + print(""" for(int i=0;i<%(nd)s;i++){//init new strides local_ostr[%(ipos)s][i]=o%(ipos)s_str[i]; } - """ % locals() + """ % locals(), file=sio) if self.verbose>2: - print >>sio, 'std::cerr <<"before broadcast collapse\\n";' - print >>sio, 'std::cerr<< "nd_collapse "<< nd_collapse << "\\n"; ' - print >> sio, 'std::cerr << "local_dims";' - for d in xrange(nd): - print >> sio, 'std::cerr << " " << local_dims[%(d)s]; ' % locals() - print >> sio, 'std::cerr << "\\n";' - - for ipos in xrange(len(node.inputs)): - print >> sio, 'std::cerr << " local_str inputs %(ipos)s: " <<'%locals() + \ - ' << " " << '.join(["local_str[%s][%s]"% (ipos, x) for x in xrange(nd)])+'<<"\\n";' - for ipos in xrange(len(node.outputs)): - print >> sio, 'std::cerr << " local_ostr inputs %(ipos)s: " <<'%locals() + \ - ' << " " << '.join(["local_ostr[%s][%s]"% (ipos, x) for x in xrange(nd)])+'<<"\\n";' - - print >> sio, """ + print('std::cerr <<"before broadcast collapse\\n";', file=sio) + print('std::cerr<< "nd_collapse "<< nd_collapse << "\\n"; ', file=sio) + print('std::cerr << "local_dims";', file=sio) + for d in range(nd): + print('std::cerr << " " << local_dims[%(d)s]; ' % locals(), file=sio) + print('std::cerr << "\\n";', file=sio) + + for ipos in range(len(node.inputs)): + print('std::cerr << " local_str inputs %(ipos)s: " <<'%locals() + \ + ' << " " << '.join(["local_str[%s][%s]"% (ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) + for ipos in range(len(node.outputs)): + print('std::cerr << " local_ostr inputs %(ipos)s: " <<'%locals() + \ + ' << " " << '.join(["local_ostr[%s][%s]"% (ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) + + print(""" for(int id=0;id2: - print >>sio, 'std::cerr <<"after broadcast collapse\\n";' - print >>sio, 'std::cerr<< "nd_collapse "<< nd_collapse << "\\n"; ' - print >> sio, 'std::cerr << "local_dims";' - for d in xrange(nd): - print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals() - print >> sio, 'std::cerr << "\\n";' - - for ipos in xrange(len(node.inputs)): - print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"% (ipos, x) for x in xrange(nd)])+'<<"\\n";' - for ipos in xrange(len(node.outputs)): - print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"% (ipos, x) for x in xrange(nd)])+'<<"\\n";' + print('std::cerr <<"after broadcast collapse\\n";', file=sio) + print('std::cerr<< "nd_collapse "<< nd_collapse << "\\n"; ', file=sio) + print('std::cerr << "local_dims";', file=sio) + for d in range(nd): + print('std::cerr << " " << local_dims[%(d)s]; '%locals(), file=sio) + print('std::cerr << "\\n";', file=sio) + + for ipos in range(len(node.inputs)): + print('std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"% (ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) + for ipos in range(len(node.outputs)): + print('std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"% (ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) # collapse contiguous dimensions (ignoring scalars, generic version(collapse any dimensions, right, left, middle)) # this is a good idea because we make less index calculation in the gpu. if nd > 0: - print >> sio, "int nd_collapse_[%(nd)s] = {"%locals() +','.join(['1' for x in xrange(nd)]) +"};" + print("int nd_collapse_[%(nd)s] = {"%locals() +','.join(['1' for x in range(nd)]) +"};", file=sio) else: - print >> sio, "int *nd_collapse_ = NULL;" - for ipos in xrange(len(node.inputs)): + print("int *nd_collapse_ = NULL;", file=sio) + for ipos in range(len(node.inputs)): if not _logical_scalar(node.inputs[ipos]): if nd > 0: - print >> sio, """ - int nd_collapse_%(ipos)s[%(nd)s] = {"""%locals() +','.join(['1' for x in xrange(nd)]) +"};" + print(""" + int nd_collapse_%(ipos)s[%(nd)s] = {"""%locals() +','.join(['1' for x in range(nd)]) +"};", file=sio) else: - print >> sio, """ - int *nd_collapse_%(ipos)s = NULL;"""%locals() - print >> sio, """ + print(""" + int *nd_collapse_%(ipos)s = NULL;"""%locals(), file=sio) + print(""" can_collapse_%(nodename)s(nd_collapse, local_dims, local_str[%(ipos)s], nd_collapse_%(ipos)s); for(int i=0;i1: - print >>sio, """ + print(""" std::cerr<< "nd_collapse_%(ipos)s "<< - """%locals() - print >>sio, ' << " " << '.join(["nd_collapse_%s[" % ipos +str(i)+"]" for i in xrange(nd)]) - print >>sio, '<< "\\n";' + """%locals(), file=sio) + print(' << " " << '.join(["nd_collapse_%s[" % ipos +str(i)+"]" for i in range(nd)]), file=sio) + print('<< "\\n";', file=sio) # update the local stride. - for ipos in xrange(len(node.inputs)): - print >> sio, """ + for ipos in range(len(node.inputs)): + print(""" for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[%(ipos)s][i-1]=local_str[%(ipos)s][i];//set new strides @@ -685,11 +685,11 @@ local_str[%(ipos)s][j-1]=local_str[%(ipos)s][j]; } } - """%locals() - - - for ipos in xrange(len(node.outputs)): - print >> sio, """ + """%locals(), file=sio) + + + for ipos in range(len(node.outputs)): + print(""" for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[%(ipos)s][i-1]=local_ostr[%(ipos)s][i];//set new strides @@ -697,10 +697,10 @@ local_ostr[%(ipos)s][j-1]=local_ostr[%(ipos)s][j]; } } - """%locals() + """%locals(), file=sio) # update the local dims. - print >> sio, """ + print(""" for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims @@ -708,45 +708,45 @@ local_dims[j-1]=local_dims[j]; } } - """%locals() + """%locals(), file=sio) #update the new number of dim - print >> sio, """ + print(""" for(int i=1, end=nd_collapse;i0: - print >> sio," && "," && ".join(l) - print >> sio,"""){nd_collapse=0;} """ + print(" && "," && ".join(l), file=sio) + print("""){nd_collapse=0;} """, file=sio) if self.verbose: - print >> sio, 'std::cerr <<"after can_collapse\\n";' - print >> sio, """std::cerr << "nd_collapse " << nd_collapse << "\\n"; """ %locals() + print('std::cerr <<"after can_collapse\\n";', file=sio) + print("""std::cerr << "nd_collapse " << nd_collapse << "\\n"; """ %locals(), file=sio) if self.verbose>1: - for d in xrange(nd): - print >> sio, 'std::cerr << " " << local_dims[%(d)s]; '%locals() - print >> sio, 'std::cerr << "\\n";' - - for ipos in xrange(len(node.inputs)): - print >> sio, 'std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";' - for ipos in xrange(len(node.outputs)): - print >> sio, 'std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"%(ipos, x) for x in xrange(nd)])+'<<"\\n";' + for d in range(nd): + print('std::cerr << " " << local_dims[%(d)s]; '%locals(), file=sio) + print('std::cerr << "\\n";', file=sio) + + for ipos in range(len(node.inputs)): + print('std::cerr << " local_str %(ipos)s: " <<'%locals()+' << " " << '.join(["local_str[%s][%s]"%(ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) + for ipos in range(len(node.outputs)): + print('std::cerr << " local_ostr %(ipos)s: " <<'%locals()+' << " " << '.join(["local_ostr[%s][%s]"%(ipos, x) for x in range(nd)])+'<<"\\n";', file=sio) def launch_Ccontiguous(nodename, scalar_op, sync=True): kernel_call_args = ["numEls"] - for ipos in xrange(len(node.inputs)): + for ipos in range(len(node.inputs)): kernel_call_args.append("i%i_data"%ipos) - for ipos in xrange(len(node.outputs)): + for ipos in range(len(node.outputs)): kernel_call_args.append("o%i_data"%ipos) kernel_call_args = ", ".join(kernel_call_args) verb="" if self.verbose: verb='std::cerr << " Running ccontiguous version\\n";' - print >> sio, """ + print(""" //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE @@ -759,9 +759,9 @@ kernel_%(scalar_op)s_%(nodename)s_Ccontiguous<<>>(%(kernel_call_args)s); //std::cerr << "calling callkernel returned\\n"; - """ %locals() + """ %locals(), file=sio) if sync: - print >> sio, """ + print(""" CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) @@ -775,33 +775,33 @@ } %(verb)s return 0; - """ %locals() + """ %locals(), file=sio) else: - print >> sio, " return 0; " %locals() + print(" return 0; " %locals(), file=sio) def launch_General(nodename, scalar_op, force_nd, sync=True): # kernel_call_args are used to invoke the cuda kernel local="local_" kernel_call_args = ["numEls"] - kernel_call_args.extend(local+"dims[%i]"%di for di in xrange(force_nd)) - for ipos in xrange(len(node.inputs)): - kernel_call_args+=["i%i_data"%ipos] + list(local+"str[%i][%i]"%(ipos, di) for di in xrange(force_nd)) + kernel_call_args.extend(local+"dims[%i]"%di for di in range(force_nd)) + for ipos in range(len(node.inputs)): + kernel_call_args+=["i%i_data"%ipos] + list(local+"str[%i][%i]"%(ipos, di) for di in range(force_nd)) #strides = ", ".join("i%i_str[%i]"%(ipos, di) for di in xrange(force_nd)) #kernel_call_args.append( "%s, i%i_data" % (strides, ipos)) - for ipos in xrange(len(node.outputs)): - kernel_call_args+=["o%i_data"%ipos] + list(local+"ostr[%i][%i]"%(ipos, di) for di in xrange(force_nd)) + for ipos in range(len(node.outputs)): + kernel_call_args+=["o%i_data"%ipos] + list(local+"ostr[%i][%i]"%(ipos, di) for di in range(force_nd)) #strides = ", ".join("o%i_str[%i]"%(ipos, di) for di in xrange(force_nd)) #kernel_call_args.append( "%s, o%i_data" % (strides, ipos)) if self.verbose: - print >> sio, """ + print(""" std::cerr << " Running general version with %(force_nd)s dims\\n"; - """%locals() - print >> sio, "std::cerr << "+ ' << " " << '.join(kernel_call_args)+' << "\\n";' + """%locals(), file=sio) + print("std::cerr << "+ ' << " " << '.join(kernel_call_args)+' << "\\n";', file=sio) #std::cerr << numEls << dims[0] << i0_data, i0_str[0] << o0_data, o0_str[0]\n; kernel_call_args = ", ".join(kernel_call_args) - print >> sio, """ + print(""" //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE @@ -813,9 +813,9 @@ threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_%(scalar_op)s_%(nodename)s_%(force_nd)s<<>>(%(kernel_call_args)s); - """ %locals() + """ %locals(), file=sio) if sync: - print >> sio, """ + print(""" CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) @@ -828,22 +828,22 @@ } return 0; - """ %locals() + """ %locals(), file=sio) else: - print >> sio, " return 0; " %locals() - print >> sio, "if(numEls==0) return 0;" - print >> sio, "switch (nd_collapse==0?0:min(%(nd)s,nd_collapse)) {"%locals() - print >> sio, "case 0: {" + print(" return 0; " %locals(), file=sio) + print("if(numEls==0) return 0;", file=sio) + print("switch (nd_collapse==0?0:min(%(nd)s,nd_collapse)) {"%locals(), file=sio) + print("case 0: {", file=sio) launch_Ccontiguous(nodename, scalar_op, self.sync) - print >> sio, " } break;" - for i in xrange(1, nd+1): - print >> sio, "case "+str(i)+": {" + print(" } break;", file=sio) + for i in range(1, nd+1): + print("case "+str(i)+": {", file=sio) launch_General(nodename, scalar_op, i, self.sync) - print >> sio, " } break;" - - print >> sio, "}"#end case - print >> sio, "return -2;" # should not get to this point - print >> sio, "}"#end fct + print(" } break;", file=sio) + + print("}", file=sio)#end case + print("return -2;", file=sio) # should not get to this point + print("}", file=sio)#end fct #N.B. cudaGetLastError is called by c_code return sio.getvalue() @@ -855,7 +855,7 @@ #define INTMOD_POW2(a, b) (a & ((1<> sio, """ + print(""" //std::cerr << "C_CODE %(opname)s START\\n"; //standard elemwise size checks - """ % locals() + """ % locals(), file=sio) if nd > 0: - print >> sio, """ + print(""" int dims[%(nd)s] = {%(initial_dims)s}; - """ % locals() + """ % locals(), file=sio) else: - print >> sio, """ + print(""" int *dims = NULL; - """ + """, file=sio) #check that all inputs have valid dimensions emitted_inames = {} @@ -896,17 +896,17 @@ # with python 2.4 (at least), if a broadcastable pattern is made of # numpy.bool_ instead of bool, calling int() once is not enough. - broadcasts = map(int, map(int, node.inputs[id].broadcastable)) + broadcasts = list(map(int, list(map(int, node.inputs[id].broadcastable)))) broadcasts = ', '.join(map(str, broadcasts)) nd = node.inputs[id].ndim if nd > 0: - print >> sio, """ + print(""" int broadcasts_%(iname)s[%(nd)s] = {%(broadcasts)s}; - """ % locals() + """ % locals(), file=sio) else: - RefactoringTool: Refactored ./theano/sandbox/cuda/blas.py print >> sio, """ + print(""" int *broadcasts_%(iname)s = NULL; - """ % locals() + """ % locals(), file=sio) emitted_inames[iname] = node.inputs[id] #check that all inputs have valid dimensions @@ -914,7 +914,7 @@ for id, iname in enumerate(inputs): if iname in emitted_inames: continue - print >> sio, """ + print(""" //std::cerr << "C_CODE %(opname)s checking input %(iname)s\\n"; if (%(nd)s != %(iname)s->nd) { @@ -941,13 +941,13 @@ %(fail)s; } } - """ % locals() + """ % locals(), file=sio) emitted_inames[iname] = True #check that all outputs have valid dimensions for idx, oname in enumerate(outputs): - if idx not in self.inplace_pattern.keys(): - print >> sio, """ + if idx not in list(self.inplace_pattern.keys()): + print(""" for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(%(oname)s)[i]) { @@ -978,11 +978,11 @@ } //std::cerr << "ELEMWISE NEW %(oname)s nd" << %(oname)s->nd << "\\n"; //std::cerr << "ELEMWISE NEW %(oname)s data" << %(oname)s->devdata << "\\n"; - """ % locals() + """ % locals(), file=sio) else: input_idx = self.inplace_pattern[idx] iname = inputs[input_idx] - print >> sio, """ + print(""" Py_XDECREF(%(oname)s); %(oname)s = %(iname)s; Py_INCREF(%(oname)s); @@ -1005,33 +1005,33 @@ } //std::cerr << "ELEMWISE NEW %(oname)s nd" << %(oname)s->nd << "\\n"; //std::cerr << "ELEMWISE NEW %(oname)s data" << %(oname)s->devdata << "\\n"; - """ % locals() - - print >> sio, """ + """ % locals(), file=sio) + + print(""" { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\\n"; if (callkernel_%(nodename)s(1, 0, dims - """ % locals() + """ % locals(), file=sio) for iname in inputs: - print >> sio, """ + print(""" , CudaNdarray_DEV_DATA(%(iname)s), CudaNdarray_HOST_STRIDES(%(iname)s) - """ % locals() + """ % locals(), file=sio) for oname in outputs: - print >> sio, """ + print(""" , CudaNdarray_DEV_DATA(%(oname)s), CudaNdarray_HOST_STRIDES(%(oname)s) - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" )) { // error - """ + """, file=sio) for oname in outputs: - print >> sio, """ + print(""" Py_DECREF(%(oname)s); %(oname)s = NULL; - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" %(fail)s; } else // no error @@ -1039,7 +1039,7 @@ } } //std::cerr << "C_CODE %(opname)s END\\n"; - """ % locals() + """ % locals(), file=sio) #print sio.getvalue() return sio.getvalue() --- ./theano/sandbox/cuda/blas.py (original) +++ ./theano/sandbox/cuda/blas.py (refactored) @@ -7,6 +7,7 @@ from theano.compat.six import StringIO from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda import GpuOp +from functools import reduce class GpuDot22(GpuOp): @@ -228,7 +229,7 @@ fail = sub['fail'] sio = StringIO() - print >> sio, """ + print(""" #define REAL float float %(name)s_a = (PyArray_TYPE(%(a)s) == NPY_FLOAT) @@ -288,7 +289,7 @@ { RefactoringTool: Refactored ./theano/sandbox/cuda/basic_ops.py %(fail)s; } - """ + """, file=sio) return sio.getvalue() % locals() gpu_gemm_no_inplace = GpuGemm(inplace=False) @@ -343,7 +344,7 @@ fail = sub['fail'] sio = StringIO() - print >> sio, """ + print(""" float %(name)s_alpha = ((dtype_%(a)s*)(%(a)s->data))[0]; float %(name)s_beta = ((dtype_%(b)s*)(%(b)s->data))[0]; @@ -386,7 +387,7 @@ { %(fail)s; } - """ + """, file=sio) return sio.getvalue() % locals() gpu_gemv_no_inplace = GpuGemv(inplace=False) gpu_gemv_inplace = GpuGemv(inplace=True) @@ -440,7 +441,7 @@ fail = sub['fail'] sio = StringIO() - print >> sio, """ + print(""" float %(name)s_alpha = ((dtype_%(a)s*)(%(a)s->data))[0]; if (%(inplace)s @@ -491,7 +492,7 @@ { %(fail)s; } - """ + """, file=sio) return sio.getvalue() % locals() gpu_ger_no_inplace = GpuGer(inplace=False) gpu_ger_inplace = GpuGer(inplace=True) --- ./theano/sandbox/cuda/basic_ops.py (original) +++ ./theano/sandbox/cuda/basic_ops.py (refactored) @@ -189,7 +189,7 @@ self.scalar_op = scalar_op self.inplace_pattern = inplace_pattern - self.destroy_map = dict((o, [i]) for o, i in inplace_pattern.items()) + self.destroy_map = dict((o, [i]) for o, i in list(inplace_pattern.items())) self.sync = sync @@ -217,7 +217,7 @@ self.sync == other.sync) def _rehash(self): - items = self.inplace_pattern.items() + items = list(self.inplace_pattern.items()) items.sort() tuple_items = [k for k, v in items] for k, v in items: @@ -237,7 +237,7 @@ def __str__(self): if self.inplace_pattern: - items = self.inplace_pattern.items() + items = list(self.inplace_pattern.items()) items.sort() # We need to print the scalar_op, not only the its class name # to have the full definition of composite op. @@ -258,7 +258,7 @@ # output is broadcastable only along dimensions where all # inputs are broadcastable broadcastable = [] - for d in xrange(_inputs[0].type.ndim): + for d in range(_inputs[0].type.ndim): bcast_d = True for i in _inputs: if not i.type.broadcastable[d]: @@ -269,7 +269,7 @@ otype = CudaNdarrayType(broadcastable=broadcastable) assert self.nout > 0 - return Apply(self, _inputs, [otype() for o in xrange(self.nout)]) + return Apply(self, _inputs, [otype() for o in range(self.nout)]) def c_support_code(self, *args, **kwargs): return self.src_generator.c_support_code(*args, **kwargs) @@ -389,17 +389,17 @@ fail = sub['fail'] #check input - print >> sio, """ + print(""" if (%(input)s->nd != %(nd_in)s) { PyErr_Format(PyExc_TypeError, "required nd=%(nd_in)s, got nd=%%i", %(input)s->nd); %(fail)s; } - """ % locals() + """ % locals(), file=sio) #alloc an output - print >> sio, """ + print(""" if (%(res)s && (%(res)s->nd == %(nd_out)s)) { //re-use previously-allocated cnda @@ -424,9 +424,9 @@ } } } - """ % locals() - - print >> sio, """ + """ % locals(), file=sio) + + print(""" if (CudaNdarray_set_device_data(%(res)s, CudaNdarray_DEV_DATA(%(input)s), %(input)s)) @@ -436,7 +436,7 @@ %(res)s = NULL; %(fail)s; } - """ % locals() + """ % locals(), file=sio) #reassign the dimension and strides in the host pointers for i, o in enumerate(self.new_order): @@ -445,26 +445,26 @@ # the correct thing to do is to insert a run-time check # that the size in this dimension is 1 assert node.outputs[0].type.broadcastable[i] - print >> sio, """ + print(""" CudaNdarray_set_dim(%(res)s, %(i)s, 1); CudaNdarray_set_stride(%(res)s, %(i)s, 0); - """ % locals() + """ % locals(), file=sio) else: - print >> sio, """ + print(""" CudaNdarray_set_dim(%(res)s, %(i)s, CudaNdarray_HOST_DIMS(%(input)s)[%(o)s]); CudaNdarray_set_stride(%(res)s, %(i)s, CudaNdarray_HOST_STRIDES(%(input)s)[%(o)s]); - """ % locals() + """ % locals(), file=sio) for i, o in enumerate(self.new_order): - print >> sio, """ + print(""" //std::cerr << "GpuDimShuffle " << %(res)s << " str[%(i)s] = " << %(res)s->str[%(i)s] << "\\n"; - """ % locals() + """ % locals(), file=sio) # copy the host dims and stride -> device if 0: - print >> sio, """ + print(""" if (CudaNdarray_copy_structure_to_device(%(res)s)) { //err msg set @@ -472,21 +472,21 @@ %(res)s = NULL; %(fail)s; } - """ % locals() + """ % locals(), file=sio) if 0: # print full code to stdout - print '--------------------------------------' - print 'C_CODE' - print '' - print self - print "IN BROAD", self.input_broadcastable - print "NEW ORDER", self.new_order - print "SHUFFLE", self.shuffle - print "AUGMENT", self.augment - print '------------' - print '' - print sio.getvalue() - print '--------------------------------------' + print('--------------------------------------') + print('C_CODE') + print('') + print(self) + print("IN BROAD", self.input_broadcastable) + print("NEW ORDER", self.new_order) + print("SHUFFLE", self.shuffle) + print("AUGMENT", self.augment) + print('------------') + print('') + print(sio.getvalue()) + print('--------------------------------------') if 0: sys.exit() @@ -557,7 +557,7 @@ if (x.type.ndim != len(self.reduce_mask)): raise TypeError("x must have rank %i" % len(self.reduce_mask)) o_broadcast = [x.type.broadcastable[i] for i - in xrange(x.type.ndim) if not self.reduce_mask[i]] + in range(x.type.ndim) if not self.reduce_mask[i]] return Apply(self, [x], [CudaNdarrayType(o_broadcast)()]) """ @@ -597,8 +597,8 @@ name = 'fake_name' - inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))] - out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))] + inp = ['fake_input_name_%d' % i for i in range(len(inputs))] + out = ['fake_output_name_%d' % i for i in range(len(node.outputs))] sub = {'fail': 'fake failure code'} @@ -622,65 +622,65 @@ fail = sub['fail'] #check input - print >> sio, """ + print(""" if (%(x)s->nd != %(nd_in)s) { PyErr_Format(PyExc_TypeError, "required nd=%(nd_in)s, got nd=%%i", %(x)s->nd); %(fail)s; } - """ % locals() + """ % locals(), file=sio) # It might be nice to use a property of the op class to do this, # but tensor.elemwise.CAReduce has this exact same check so I guess # this is OK to do if self.scalar_op in [scal.minimum, scal.maximum]: conds = ["(CudaNdarray_HOST_DIMS(%s)[%d] == 0)" % (x, i) - for i in xrange(nd_in) + for i in range(nd_in) if self.reduce_mask[i]] assert len(conds) > 0 cond = "(" + " || ".join(conds) + ")" - print >> sio, """ + print(""" if %(cond)s { PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis."); %(fail)s; } - """ %locals() + """ %locals(), file=sio) # # alloc an output if we need one # # check the basics of out output - print >> sio, """ + print(""" if ( !%(z)s || (%(z)s->nd != %(nd_out)s) - """ % locals() + """ % locals(), file=sio) #ensure that the output has the right non-reduced dimensions j = 0 - for i in xrange(nd_in): + for i in range(nd_in): if not self.reduce_mask[i]: - print >> sio, " || (CudaNdarray_HOST_DIMS(%(z)s)[%(j)s] != CudaNdarray_HOST_DIMS(%(x)s)[%(i)d]) " % locals() + print(" || (CudaNdarray_HOST_DIMS(%(z)s)[%(j)s] != CudaNdarray_HOST_DIMS(%(x)s)[%(i)d]) " % locals(), file=sio) j += 1 - print >> sio, """ + print(""" ) { - """ % locals() + """ % locals(), file=sio) if nd_out > 0: - print >> sio, "int new_dims[%(nd_out)s]; " % locals() + print("int new_dims[%(nd_out)s]; " % locals(), file=sio) else: - print >> sio, "int *new_dims=NULL; " + print("int *new_dims=NULL; ", file=sio) j = 0 - for i in xrange(nd_in): + for i in range(nd_in): if not self.reduce_mask[i]: - print >> sio, 'new_dims[%(j)s] = CudaNdarray_HOST_DIMS(%(x)s)[%(i)s];' % locals() + print('new_dims[%(j)s] = CudaNdarray_HOST_DIMS(%(x)s)[%(i)s];' % locals(), file=sio) j += 1 - print >> sio, """ + print(""" Py_XDECREF(%(z)s); %(z)s = (CudaNdarray*) CudaNdarray_NewDims(%(nd_out)s, new_dims); if (NULL == %(z)s) @@ -689,7 +689,7 @@ %(fail)s; } } - """ % locals() + """ % locals(), file=sio) # \begin bracket the reduction in a check that there is # actually work to do @@ -702,13 +702,13 @@ "GpuCAReduce not implemented when input shape is 0 for this scalar_op"); %(fail)s; """ % locals() - print >> sio, """ + print(""" if (CudaNdarray_SIZE(%(z)s) && ! CudaNdarray_SIZE(%(x)s)){ %(zero_shp)s; } else if (CudaNdarray_SIZE(%(z)s)) { - """ % locals() + """ % locals(), file=sio) # # Now perform the reduction @@ -718,20 +718,20 @@ #check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code. #TODO: check if we are ccontiguous when we un-dimshuffle #TODO: if only some dims are ccontiguous, call version with less dims. - print >> sio, 'if(CudaNdarray_is_c_contiguous(%(x)s)){'%locals() + print('if(CudaNdarray_is_c_contiguous(%(x)s)){'%locals(), file=sio) self.c_code_reduce_ccontig(sio, node, name, x, z, fail) - print >> sio, "}else{" + print("}else{", file=sio) getattr(self, 'c_code_reduce_%s'%(''.join( str(i) for i in self.reduce_mask)))(sio, node, name, x, z, fail) - print >> sio, "}" + print("}", file=sio) else: getattr(self, 'c_code_reduce_%s'%(''.join( str(i) for i in self.reduce_mask)))(sio, node, name, x, z, fail) # \end bracket the reduction ... - print >> sio, """ - } - """ % locals() + print(""" + } + """ % locals(), file=sio) return sio.getvalue() @@ -771,7 +771,7 @@ shapes_data = ",".join(["CudaNdarray_HOST_DIMS(%s)[%d]" % (x, i) for i in range(node.inputs[0].ndim)]) - print >> sio, """ + print(""" if (verbose) printf("running kernel_reduce_%(pattern)s_%(name)s\\n"); int n_shared = sizeof(float) * n_threads.x * n_threads.y * n_threads.z; @@ -784,27 +784,27 @@ n_blocks.x,n_blocks.y, n_blocks.x*n_blocks.y, n_shared, %(shapes_data)s); kernel_reduce_%(pattern)s_%(name)s<<>>( - """ % locals() - for i in xrange(ndim): - print >> sio, """ + """ % locals(), file=sio) + for i in range(ndim): + print(""" CudaNdarray_HOST_DIMS(%(x)s)[%(i)s], - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" CudaNdarray_DEV_DATA(%(x)s) - """ % locals() - for i in xrange(ndim): - print >> sio, """ + """ % locals(), file=sio) + for i in range(ndim): + print(""" ,CudaNdarray_HOST_STRIDES(%(x)s)[%(i)s] - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" ,CudaNdarray_DEV_DATA(%(z)s) - """ % locals() - for i in xrange(nd_out): - print >> sio, """ + """ % locals(), file=sio) + for i in range(nd_out): + print(""" ,CudaNdarray_HOST_STRIDES(%(z)s)[%(i)s] - """ % locals() - - print >> sio, """ + """ % locals(), file=sio) + + print(""" ); CNDA_THREAD_SYNC; cudaError_t sts = cudaGetLastError(); @@ -824,7 +824,7 @@ %(shapes_data)s); %(fail)s; } - """ % locals() + """ % locals(), file=sio) return sio.getvalue() def _k_decl(self, node, nodename, pattern=None, @@ -858,28 +858,28 @@ pattern = ''.join(str(i) for i in reduce_mask) sio = StringIO() - print >> sio, """ + print(""" static __global__ void kernel_reduce_%(pattern)s_%(nodename)s( - """ % locals() - for i in xrange(ndim): - print >> sio, """ + """ % locals(), file=sio) + for i in range(ndim): + print(""" const int d%(i)s, - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" const float *A, - """ % locals() - for i in xrange(ndim): - print >> sio, """ + """ % locals(), file=sio) + for i in range(ndim): + print(""" const int sA%(i)s, - """ % locals() - print >> sio, """ + """ % locals(), file=sio) + print(""" float * Z - """ % locals() - for i in xrange(ndim - sum(reduce_mask)): - print >> sio, """ + """ % locals(), file=sio) + for i in range(ndim - sum(reduce_mask)): + print(""" , const int sZ%(i)s - """ % locals() - print >> sio, ")" + """ % locals(), file=sio) + print(")", file=sio) return sio.getvalue() def _k_init(self, *args): @@ -1097,7 +1097,7 @@ %(fail)s; """ % locals() - print >> sio, """ + print(""" { if(CudaNdarray_SIZE(%(x)s)==0){ %(zero_shp)s; @@ -1133,11 +1133,11 @@ } } } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_1(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1146,11 +1146,11 @@ dim3 n_blocks(1); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_11(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1164,7 +1164,7 @@ dim3 n_blocks(1); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_01X(self, sio, node, name, x, z, fail, N): """ @@ -1176,9 +1176,9 @@ makecall = self._makecall(node, name, x, z, fail) N_pattern = ''.join(['1'] * N) param_dim = ",".join(["CudaNdarray_HOST_DIMS(%s)[%d]" % (x, i) - for i in xrange(N + 1)]) + for i in range(N + 1)]) strides_dim = ",".join(["CudaNdarray_HOST_STRIDES(%s)[%d]" - % (x, i) for i in xrange(N + 1)]) + % (x, i) for i in range(N + 1)]) threads_y = """ //get as many y threads as we can fit @@ -1207,7 +1207,7 @@ if len(self.reduce_mask) == 3: threads_z = '' - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1219,7 +1219,7 @@ NUM_VECTOR_OP_BLOCKS)); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_01(self, sio, node, name, x, z, fail): self.c_code_reduce_01X(sio, node, name, x, z, fail, 1) @@ -1231,7 +1231,7 @@ self.c_code_reduce_01X(sio, node, name, x, z, fail, 3) def c_code_reduce_10(self, sio, node, name, x, z, fail): - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1277,14 +1277,14 @@ %(fail)s; } } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_010(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) makecall_inner = self._makecall(node, name, x, z, fail, pattern="010_inner") pattern = ''.join(str(i) for i in self.reduce_mask) - print >> sio, """ + print(""" { //int n_summations = CudaNdarray_HOST_DIMS(%(x)s)[0] * CudaNdarray_HOST_DIMS(%(x)s)[2]; @@ -1399,11 +1399,11 @@ } } } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_0101(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1418,14 +1418,14 @@ dim3 n_blocks(CudaNdarray_HOST_DIMS(%(x)s)[0], CudaNdarray_HOST_DIMS(%(x)s)[2]); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_100(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) # use threadIdx.x for i0 # use blockIdx.x for i1 # use blockIdx.y for i2 - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1438,11 +1438,11 @@ } %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_110(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1459,11 +1459,11 @@ dim3 n_blocks(CudaNdarray_HOST_DIMS(%(x)s)[2]); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_001(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1481,11 +1481,11 @@ n_blocks.y -= 1; %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_111(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1513,11 +1513,11 @@ dim3 n_blocks(1,1,1); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_0011(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; @@ -1543,11 +1543,11 @@ %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_1111(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1578,11 +1578,11 @@ dim3 n_blocks(1,1,1); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_reduce_1011(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) - print >> sio, """ + print(""" { int verbose = 0; dim3 n_threads( @@ -1602,7 +1602,7 @@ dim3 n_blocks(CudaNdarray_HOST_DIMS(%(x)s)[1]); %(makecall)s } - """ % locals() + """ % locals(), file=sio) def c_code_cache_version_apply(self, node): version = [8] # the version corresponding to the c code in this Op @@ -1630,7 +1630,7 @@ "A[i0]", {}) reduce_init = self._assign_init("A[0]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_ccontig_%(nodename)s( const unsigned int d0, const float *A, @@ -1652,7 +1652,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1,): #this kernel is ok for up to a few thousand elements, but # it only runs on ONE multiprocessor @@ -1661,7 +1661,7 @@ "A[i0 * sA0]", {}) reduce_init = self._assign_init("A[0]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_1_%(nodename)s( const unsigned int d0, const float *A, const int sA0, @@ -1683,7 +1683,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1, 1): #this kernel is ok for up to a few thousand elements, but # it only runs on ONE multiprocessor @@ -1693,7 +1693,7 @@ {}) reduce_init = self._assign_init("A[0]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_11_%(nodename)s( const int d0, const int d1, @@ -1719,7 +1719,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) #01, 011, 0111 if (0 == self.reduce_mask[0] and all(self.reduce_mask[1:]) and @@ -1765,9 +1765,9 @@ reducebuf = self._k_reduce_buf('Z[i0 * sZ0]', node, nodename, sub={}) param_dim = ",".join(["const int d%d" % i - for i in xrange(nd_in)]) + for i in range(nd_in)]) param_strides = ",".join(["const int sA%d" % i - for i in xrange(nd_in)]) + for i in range(nd_in)]) decl = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_init = self._assign_init("A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0]" % locals()) @@ -1775,7 +1775,7 @@ node, nodename, "myresult", "A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0]", {}) - print >> sio, """ + print(""" %(decl)s{ %(init)s for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){ @@ -1790,7 +1790,7 @@ %(reducebuf)s } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0): # this kernel uses one block for each column, # threads per block for each element per column. @@ -1804,7 +1804,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2]", {}) reduce_init = self._assign_init("A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_010_%(nodename)s( const int d0, const int d1, @@ -1837,13 +1837,13 @@ } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 1, 0): reduce_fct = self._assign_reduce(node, nodename, "myresult", "X[a * sX0 + b * sX1 + c * sX2]", {}) reduce_init = self._assign_init("X[a * sX0 + 0 * sX1 + c * sX2]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_010_AD_%(nodename)s( const int A, const int B, @@ -1881,7 +1881,7 @@ } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 1, 0): # # This kernel is optimized when the inner most dimensions @@ -1903,7 +1903,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2]", {}) reduce_init = self._assign_init("A[i0 * sA0 + 0 * sA1 + i2 * sA2]") - print >> sio, """ + print(""" %(decl)s { if(warpSize> sio, """ + print(""" static __global__ void kernel_reduce_110_%(nodename)s( const int d0, const int d1, @@ -1970,7 +1970,7 @@ %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1, 0, 0): reducebuf = self._k_reduce_buf('Z[i1 * sZ0 + i2 * sZ1]', node, nodename, sub={}) @@ -1980,7 +1980,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2]", {}) reduce_init = self._assign_init("A[i1 * sA1 + i2 * sA2]") - print >> sio, """ + print(""" %(decl)s { %(init)s @@ -1997,7 +1997,7 @@ } } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1, 1, 1): reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) @@ -2007,7 +2007,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2]", {}) reduce_init = self._assign_init("A[0]") - print >> sio, """ + print(""" %(decl)s { %(init)s @@ -2024,7 +2024,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 0, 1): # this kernel uses one block for each row, # threads per block for each element per row. @@ -2034,7 +2034,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2]", {}) reduce_init = self._assign_init("A[i0 * sA0 + i1 * sA1]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_001_%(nodename)s( const int d0, const int d1, @@ -2065,7 +2065,7 @@ } } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 0, 1, 1): # this kernel uses one block for each row, # threads per block for each element per row. @@ -2077,7 +2077,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3]", {}) reduce_init = self._assign_init("A[i0 * sA0 + i1 * sA1]") - print >> sio, """ + print(""" %(decl)s { %(init)s @@ -2098,7 +2098,7 @@ } } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (0, 1, 0, 1): # this kernel uses one block for each row, # threads per block for each element per row. @@ -2110,7 +2110,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3]", {}) reduce_init = self._assign_init("A[i0 * sA0 + i2 * sA2]") - print >> sio, """ + print(""" %(decl)s { %(init)s @@ -2131,7 +2131,7 @@ } } } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1, 1, 1, 1): reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) @@ -2141,7 +2141,7 @@ "A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3]", {}) reduce_init = self._assign_init("A[0]") - print >> sio, """ + print(""" %(decl)s { %(init)s @@ -2159,7 +2159,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) if self.reduce_mask == (1, 0, 1, 1): reducebuf = self._k_reduce_buf('Z[blockIdx.x*sZ0]', node, nodename, sub={}) @@ -2167,7 +2167,7 @@ "A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3]", {}) reduce_init = self._assign_init("A[blockIdx.x * sA1]") - print >> sio, """ + print(""" static __global__ void kernel_reduce_1011_%(nodename)s( const unsigned int d0, const unsigned int d1, @@ -2199,7 +2199,7 @@ } %(reducebuf)s } - """ % locals() + """ % locals(), file=sio) return sio.getvalue() @@ -3205,7 +3205,7 @@ # If the output is a constant, it will have to be deepcopied # each time the function is called. So we do not fold. return False - elif (not isinstance(client[0], basestring) + elif (not isinstance(client[0], str) and isinstance(client[0].op, ( tensor.IncSubtensor, tensor.AdvancedIncSubtensor1, @@ -3258,7 +3258,7 @@ Py_INCREF(%(z)s); } else if ((NULL == %(z)s)""" % locals() - for i in xrange(len(node.inputs[0].type.broadcastable)): + for i in range(len(node.inputs[0].type.broadcastable)): str += "\n|| (CudaNdarray_HOST_DIMS(%(input)s)[%(i)s] != CudaNdarray_HOST_DIMS(%(z)s)[%(i)s])" % locals() str += """ || !CudaNdarray_is_c_contiguous(%(z)s)) @@ -3374,15 +3374,15 @@ apply_time, apply_cimpl, message, outputs_size, other_time): if any([x[1].op.__class__.__name__.lower().startswith("gpu") - for x in apply_time.keys()]): + for x in list(apply_time.keys())]): local_time = sum(apply_time.values()) - print - print 'Some info useful for gpu:' + print() + print('Some info useful for gpu:') cpu = 0 gpu = 0 trans = 0 - for (_, node), t in apply_time.items(): + for (_, node), t in list(apply_time.items()): if isinstance(node.op.__class__.__name__, (HostFromGpu, GpuFromHost)): trans += t @@ -3390,35 +3390,35 @@ gpu += t else: cpu += t - print - print " Spent %.3fs(%.3f%%) in cpu Op, %.3fs(%.3f%%) in gpu Op and %.3fs(%.3f%%) transfert Op" % ( + print() + print(" Spent %.3fs(%.3f%%) in cpu Op, %.3fs(%.3f%%) in gpu Op and %.3fs(%.3f%%) transfert Op" % ( cpu, cpu / local_time * 100, gpu, gpu / local_time * 100, - trans, trans / local_time * 100) - - print - print " Theano function input that are float64" - print " " - for fct in fct_call.keys(): + trans, trans / local_time * 100)) + + print() + print(" Theano function input that are float64") + print(" ") + for fct in list(fct_call.keys()): for i in fct.input_storage: if hasattr(i.type, 'dtype') and i.type.dtype == 'float64': - print ' ', fct.name, i.name, i.type, i - - print - print " List of apply that don't have float64 as input but have float64 in outputs" - print " (Useful to know if we forgot some cast when using floatX=float32 or gpu code)" - print ' ' - for fct in fct_call.keys(): + print(' ', fct.name, i.name, i.type, i) + + print() + print(" List of apply that don't have float64 as input but have float64 in outputs") + print(" (Useful to know if we forgot some cast when using floatX=float32 or gpu code)") + print(' ') + for fct in list(fct_call.keys()): for idx, node in enumerate(fct.maker.fgraph.toposort()): RefactoringTool: Refactored ./theano/sandbox/cuda/__init__.py if (any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.outputs) and not any(hasattr(i, 'dtype') and i.dtype == 'float64' for i in node.inputs)): - print ' ', str(node), idx, fct.name, - print str([getattr(i, 'dtype', None) - for i in node.inputs]), - print str([getattr(i, 'dtype', None) - for i in node.outputs]) + print(' ', str(node), idx, fct.name, end=' ') + print(str([getattr(i, 'dtype', None) + for i in node.inputs]), end=' ') + print(str([getattr(i, 'dtype', None) + for i in node.outputs])) class GpuEye(GpuOp): @@ -3445,7 +3445,7 @@ return [out_shape] def grad(self, inp, grads): - return [grad_undefined(self, i, inp[i]) for i in xrange(3)] + return [grad_undefined(self, i, inp[i]) for i in range(3)] def __eq__(self, other): return type(self) == type(other) and self.dtype == other.dtype --- ./theano/sandbox/cuda/__init__.py (original) +++ ./theano/sandbox/cuda/__init__.py (refactored) @@ -12,7 +12,7 @@ from theano.gof.cmodule import get_lib_extension from theano.gof.compilelock import get_lock, release_lock from theano.configparser import config, AddConfigVar, StrParam, BoolParam -import nvcc_compiler +from . import nvcc_compiler _logger_name = 'theano.sandbox.cuda' _logger = logging.getLogger(_logger_name) @@ -159,7 +159,7 @@ include_dirs=[cuda_path], libs=[config.cublas.lib], preargs=['-O3'] + compiler.compile_args()) from cuda_ndarray.cuda_ndarray import * - except Exception, e: + except Exception as e: _logger.error("Failed to compile cuda_ndarray.cu: %s", str(e)) set_cuda_disabled() finally: @@ -192,7 +192,7 @@ else: try: os.symlink(cuda_ndarray_so, libcuda_ndarray_so) - except OSError, e: + except OSError as e: # This may happen for instance when running multiple # concurrent jobs, if two of them try to create the # symlink simultaneously. @@ -209,7 +209,7 @@ cuda_initialization_error_message = "" # actively closing our gpu session presents segfault-on-exit on some systems atexit.register(gpu_shutdown) - except EnvironmentError, e: + except EnvironmentError as e: cuda_available = False cuda_initialization_error_message = " ".join(e.args) @@ -263,8 +263,8 @@ shared_constructor = float32_shared_constructor - import basic_ops - from basic_ops import ( + from . import basic_ops + from .basic_ops import ( GpuFromHost, HostFromGpu, GpuElemwise, GpuDimShuffle, GpuCAReduce, GpuReshape, GpuContiguous, GpuSubtensor, GpuIncSubtensor, @@ -274,11 +274,11 @@ ftensor3, ftensor4, scalar, vector, matrix, row, col, tensor3, tensor4) - from basic_ops import (host_from_gpu, gpu_from_host, + from .basic_ops import (host_from_gpu, gpu_from_host, as_cuda_array, as_cuda_ndarray_variable) - import opt + from . import opt import cuda_ndarray - from rng_curand import CURAND_RandomStreams + from .rng_curand import CURAND_RandomStreams def use(device, @@ -378,8 +378,8 @@ " this property") if config.print_active_device: - print >> sys.stderr, "Using gpu device %d: %s" % ( - active_device_number(), active_device_name()) + print("Using gpu device %d: %s" % ( + active_device_number(), active_device_name()), file=sys.stderr) if device_properties(use.device_number)['regsPerBlock'] < 16384: RefactoringTool: Refactored ./theano/sandbox/cuda/GpuConvTransp3D.py RefactoringTool: Refactored ./theano/sandbox/cuda/GpuConvGrad3D.py # We will try to use too much register per bloc at many places # when there is only 8k register per multi-processor. @@ -390,7 +390,7 @@ " crash when we try to use features" " that your GPU does not support.") - except (EnvironmentError, ValueError, RuntimeError), e: + except (EnvironmentError, ValueError, RuntimeError) as e: _logger.error(("ERROR: Not using GPU." " Initialisation of device %s failed:\n%s"), str(device), e) @@ -424,7 +424,7 @@ #in case the device if just gpu, # we check that the driver init it correctly. cuda_ndarray.cuda_ndarray.CudaNdarray.zeros((5, 5)) - except (Exception, NameError), e: + except (Exception, NameError) as e: # NameError when no gpu present as cuda_ndarray is not loaded. e.args += ("ERROR: GPU forced but failed. ",) raise --- ./theano/sandbox/cuda/GpuConvTransp3D.py (original) +++ ./theano/sandbox/cuda/GpuConvTransp3D.py (refactored) @@ -43,7 +43,7 @@ def perform_(self, node, inputs, output_storage): W, b, d, H, RShape = inputs - print "\t\t\t\tGpuConvTransp3D python code still uses old format" + print("\t\t\t\tGpuConvTransp3D python code still uses old format") output_storage[0][0] = computeR(W,b,d,H,RShape) def c_code_cache_version(self): @@ -381,7 +381,7 @@ if Rshape is not None and Rshape[0] != -1: if Rshape[0] < videoHeight: - print (Rshape[0], videoHeight) + print((Rshape[0], videoHeight)) assert False assert Rshape[1] >= videoWidth assert Rshape[2] >= videoDur @@ -398,14 +398,14 @@ videoWidth, videoDur ) , dtype=H.dtype) #R[i,j,r,c,t] = b_j + sum_{rc,rk | d \circ rc + rk = r} sum_{cc,ck | ...} sum_{tc,tk | ...} sum_k W[k, j, rk, ck, tk] * H[i,k,rc,cc,tc] - for i in xrange(0,batchSize): + for i in range(0,batchSize): #print '\texample '+str(i+1)+'/'+str(batchSize) - for j in xrange(0,inputChannels): + for j in range(0,inputChannels): #print '\t\tfeature map '+str(j+1)+'/'+str(inputChannels) - for r in xrange(0,videoHeight): + for r in range(0,videoHeight): #print '\t\t\trow '+str(r+1)+'/'+str(videoHeight) - for c in xrange(0,videoWidth): - for t in xrange(0,videoDur): + for c in range(0,videoWidth): + for t in range(0,videoDur): R[i,j,r,c,t] = b[j] ftc = max([0, int(numpy.ceil(float(t-filterDur +1 )/float(dt))) ]) --- ./theano/sandbox/cuda/GpuConvGrad3D.py (original) +++ ./theano/sandbox/cuda/GpuConvGrad3D.py (refactored) @@ -33,7 +33,7 @@ def perform_(self, node, inputs, output_storage): V, d, WShape, dCdH = inputs - print "GpuConvGrad3D python code (warning not updated to new format)" + print("GpuConvGrad3D python code (warning not updated to new format)") #partial C / partial W[j,z,k,l,m] = sum_i sum_p sum_q sum_r (partial C /partial H[i,j,p,q,r] ) * V[i,z,dr*p+k,dc*q+l,dt*r+m] @@ -52,17 +52,17 @@ dCdW = numpy.zeros(WShape, dtype=V.dtype) #block - for j in xrange(0,WShape[0]): - for z in xrange(0,WShape[1]): - for k in xrange(0,WShape[2]): - for l in xrange(0,WShape[3]): + for j in range(0,WShape[0]): + for z in range(0,WShape[1]): + for k in range(0,WShape[2]): + for l in range(0,WShape[3]): #threads - for m in xrange(0,WShape[4]): + for m in range(0,WShape[4]): #thread - for i in xrange(0,batchSize): - RefactoringTool: No changes to ./theano/sandbox/cuda/GpuConv3D.py RefactoringTool: Refactored ./theano/sandbox/conv.py RefactoringTool: No changes to ./theano/raise_op.py RefactoringTool: Refactored ./theano/printing.py for p in xrange(0,outputHeight): - for q in xrange(0,outputWidth): - for r in xrange(0,outputDur): + for i in range(0,batchSize): + for p in range(0,outputHeight): + for q in range(0,outputWidth): + for r in range(0,outputDur): dCdW[j,z,k,l,m] += dCdH[i,j,p,q,r] * V[i,z,dr*p+k,dc*q+l,dt*r+m] output_storage[0][0] = dCdW --- ./theano/sandbox/conv.py (original) +++ ./theano/sandbox/conv.py (refactored) @@ -1,3 +1,3 @@ import sys -print >> sys.stderr, "DEPRECATION: theano.sandbox.conv no longer provides conv. They have been moved to theano.tensor.nnet.conv" +print("DEPRECATION: theano.sandbox.conv no longer provides conv. They have been moved to theano.tensor.nnet.conv", file=sys.stderr) from theano.tensor.nnet.conv import * --- ./theano/printing.py (original) +++ ./theano/printing.py (refactored) @@ -7,6 +7,7 @@ import logging import os import sys +import collections # Not available on all platforms hashlib = None @@ -111,11 +112,11 @@ def _print_fn(op, xin): for attr in op.attrs: temp = getattr(xin, attr) - if callable(temp): + if isinstance(temp, collections.Callable): pmsg = temp() else: pmsg = temp - print op.message, attr, '=', pmsg + print(op.message, attr, '=', pmsg) class Print(Op): @@ -249,7 +250,7 @@ def __init__(self, *patterns): self.patterns = [] for pattern in patterns: - if isinstance(pattern, basestring): + if isinstance(pattern, str): self.patterns.append((pattern, ())) else: self.patterns.append((pattern[0], pattern[1:])) @@ -388,16 +389,16 @@ current = None if display_inputs: strings = [(0, "inputs: " + ", ".join( - map(str, list(inputs) + updates.keys())))] + map(str, list(inputs) + list(updates.keys()))))] else: strings = [] pprinter = self.clone_assign(lambda pstate, r: r.name is not None and r is not current, LeafPrinter()) - inv_updates = dict((b, a) for (a, b) in updates.iteritems()) + inv_updates = dict((b, a) for (a, b) in updates.items()) i = 1 - for node in gof.graph.io_toposort(list(inputs) + updates.keys(), - list(outputs) + updates.values()): + for node in gof.graph.io_toposort(list(inputs) + list(updates.keys()), + list(outputs) + list(updates.values())): for output in node.outputs: if output in inv_updates: name = str(inv_updates[output]) @@ -450,14 +451,14 @@ epsilon="\\epsilon") else: - special = dict(middle_dot=u"\u00B7", - big_sigma=u"\u03A3") - - greek = dict(alpha=u"\u03B1", - beta=u"\u03B2", - gamma=u"\u03B3", - delta=u"\u03B4", - epsilon=u"\u03B5") + special = dict(middle_dot="\u00B7", + big_sigma="\u03A3") + + greek = dict(alpha="\u03B1", + beta="\u03B2", + gamma="\u03B3", + delta="\u03B4", + epsilon="\u03B5") pprint = PPrinter() @@ -714,7 +715,7 @@ astr = apply_name(node) use_color = None - for opName, color in colorCodes.items(): + for opName, color in list(colorCodes.items()): if opName in node.op.__class__.__name__: use_color = color @@ -793,7 +794,7 @@ g.write(outfile, prog='dot', format=format) if print_output_file: - print 'The output file is available at', outfile + print('ThRefactoringTool: No changes to ./theano/misc/windows.py RefactoringTool: No changes to ./theano/misc/tests/test_pycuda_utils.py RefactoringTool: Refactored ./theano/misc/tests/test_pycuda_theano_simple.py e output file is available at', outfile) if assert_nb_all_strings != -1: assert len(all_strings) == assert_nb_all_strings @@ -882,13 +883,13 @@ return if app in my_list: return - astr = apply_name(app) + '_' + str(len(my_list.keys())) + astr = apply_name(app) + '_' + str(len(list(my_list.keys()))) if len(astr) > max_label_size: astr = astr[:max_label_size - 3] + '...' my_list[app] = astr use_color = None - for opName, color in colorCodes.items(): + for opName, color in list(colorCodes.items()): if opName in app.op.__class__.__name__: use_color = color @@ -902,7 +903,7 @@ for i, nd in enumerate(app.inputs): if nd not in my_list: - varastr = var_name(nd) + '_' + str(len(my_list.keys())) + varastr = var_name(nd) + '_' + str(len(list(my_list.keys()))) if len(varastr) > max_label_size: varastr = varastr[:max_label_size - 3] + '...' my_list[nd] = varastr @@ -922,7 +923,7 @@ for i, nd in enumerate(app.outputs): if nd not in my_list: - varastr = var_name(nd) + '_' + str(len(my_list.keys())) + varastr = var_name(nd) + '_' + str(len(list(my_list.keys()))) if len(varastr) > max_label_size: varastr = varastr[:max_label_size - 3] + '...' my_list[nd] = varastr @@ -959,7 +960,7 @@ plot_apply(nd.owner, depth) try: g.write_png(outfile, prog='dot') - except pd.InvocationException, e: + except pd.InvocationException as e: # Some version of pydot are bugged/don't work correctly with # empty label. Provide a better user error message. if pd.__version__ == "1.0.28" and "label=]" in e.message: @@ -973,7 +974,7 @@ " fix this problem. The pydot error is: " + e.message) - print 'The output file is available at', outfile + print('The output file is available at', outfile) class _TagGenerator: @@ -1143,7 +1144,7 @@ # The __str__ method is encoding the object's id in its str name = position_independent_str(obj) if ' at 0x' in name: - print name + print(name) assert False prefix = cur_tag + '=' --- ./theano/misc/tests/test_pycuda_theano_simple.py (original) +++ ./theano/misc/tests/test_pycuda_theano_simple.py (refactored) @@ -78,12 +78,12 @@ def test_pycuda_memory_to_theano(): #Test that we can use the GpuArray memory space in pycuda in a CudaNdarray y = pycuda.gpuarray.zeros((3, 4, 5), 'float32') - print sys.getrefcount(y) + print(sys.getrefcount(y)) # This increase the ref count with never pycuda. Do pycuda also # cache ndarray? # print y.get() - print "gpuarray ref count before creating a CudaNdarray", - print sys.getrefcount(y) + print("gpuarray ref count before creating a CudaNdarray", end=' ') + print(sys.getrefcount(y)) assert sys.getrefcount(y) == 2 rand = numpy.random.randn(*y.shape).astype(numpy.float32) cuda_rand = cuda_ndarray.CudaNdarray(rand) @@ -92,13 +92,13 @@ for i in y.shape[::-1][:-1]: strides.append(strides[-1] * i) strides = tuple(strides[::-1]) - print 'strides', strides + print('strides', strides) assert cuda_rand._strides == strides, (cuda_rand._strides, strides) # in pycuda trunk, y.ptr also works, which is a little cleaner y_ptr = int(y.gpudata) z = cuda_ndarray.from_gpu_pointer(y_ptr, y.shape, strides, y) - print "gpuarray ref count after creating a CudaNdarray", sys.getrefcount(y) + print("gpuarray ref count after creating a CudaNdarray", sys.getrefcount(y)) assert sys.getrefcount(y) == 3 assert (numpy.asarray(z) == 0).all() assert z.base is y @@ -124,6 +124,6 @@ # Check that the ref count to the gpuarray isRefactoringTool: No changes to ./theano/misc/tests/test_pycuda_example.py RefactoringTool: No changes to ./theano/misc/tests/test_may_share_memory.py RefactoringTool: No changes to ./theano/misc/tests/test_gnumpy_utils.py RefactoringTool: No changes to ./theano/misc/tests/test_cudamat_utils.py RefactoringTool: Refactored ./theano/misc/strutil.py RefactoringTool: No changes to ./theano/misc/safe_asarray.py RefactoringTool: No changes to ./theano/misc/pycuda_utils.py RefactoringTool: No changes to ./theano/misc/pycuda_init.py RefactoringTool: Refactored ./theano/misc/pycuda_example.py right. del z - print "gpuarray ref count after deleting the CudaNdarray", - print sys.getrefcount(y) + print("gpuarray ref count after deleting the CudaNdarray", end=' ') + print(sys.getrefcount(y)) assert sys.getrefcount(y) == 2 --- ./theano/misc/strutil.py (original) +++ ./theano/misc/strutil.py (refactored) @@ -14,7 +14,7 @@ """ try: finalCode = string % sub - except Exception , E: + except Exception as E: # If unable to render the string, render longer and longer # initial substrings until we find the minimal initial substring # that causes an error @@ -22,7 +22,7 @@ while i <= len(string): try: finalCode = string[0:i] % sub - except Exception, F: + except Exception as F: if str(F) == str(E): raise Exception(string[0:i]+"<<<< caused exception "+str(F)) i+=1 @@ -40,7 +40,7 @@ lines = [ strip_leading_white_space(line) for line in lines ] indent = 0 - for i in xrange(len(lines)): + for i in range(len(lines)): indent -= lines[i].count('}') if indent < 0: indent = 0 --- ./theano/misc/pycuda_example.py (original) +++ ./theano/misc/pycuda_example.py (refactored) @@ -30,7 +30,7 @@ from theano.sandbox.cuda.opt import gpu_seqopt from theano.tensor.utils import hash_from_dict -import pycuda_init +from . import pycuda_init if not pycuda_init.pycuda_available: raise Exception("No pycuda available. You can't load pycuda_example.py") @@ -190,7 +190,7 @@ def __str__(self): if self.name is None: if self.inplace_pattern: - items = self.inplace_pattern.items() + items = list(self.inplace_pattern.items()) items.sort() return self.__class__.__name__ + "{%s}%s" % (self.scalar_op, str(items)) @@ -224,16 +224,16 @@ assert self.nout == 1 fct_name = "pycuda_elemwise_%s" % str(self.scalar_op) - out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)]) + out_node = Apply(self, _inputs, [otype() for o in range(self.nout)]) in_name = ["i" + str(id) for id in range(len(inputs))] out_name = ["o" + str(id) for id in range(self.nout)] c_code = self.scalar_op.c_code(out_node, "some_name", tuple([n + "[i]" for n in in_name]), tuple(n + "[i]" for n in out_name), {}) c_code_param = ", ".join([_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name - for var, name in (zip(inputs, in_name) + - zip(out_node.outputs, - out_name))] + + for var, name in (list(zip(inputs, in_name)) + + list(zip(out_node.outputs, + out_name)))] + ["int size"]) mod = SourceModule(""" __global__ void %s(%s) @@ -284,7 +284,7 @@ def __str__(self): if self.name is None: if self.inplace_pattern: - items = self.inplace_pattern.items() + items = list(self.inplace_pattern.items()) items.sort() return self.__class__.__name__ + "{%s}%s" % (self.scalar_op, str(items)) @@ -307,7 +307,7 @@ raise Exception("pycuda don't support broadcasted dimensions") otype = CudaNdarrayType(broadcastable=[False] * _inputs[0].type.ndim) - out_node = Apply(self, _inputs, [otype() for o in xrange(self.nout)]) + out_node = Apply(self, _inputs, [otype() for o in range(self.nout)]) return out_node def make_thunk(self, node, storage_map, RefactoringTool: Refactored ./theano/misc/pkl_utils.py RefactoringTool: Refactored ./theano/misc/ordered_set.py RefactoringTool: Refactored ./theano/misc/nose_pr.py RefactoringTool: No changes to ./theano/misc/may_share_memory.py RefactoringTool: Refactored ./theano/misc/latence_gpu_transfert.py _, _2): @@ -322,8 +322,8 @@ tuple(n + "[i]" for n in out_name), {}) c_code_param = ", ".join([_replace_npy_types(var.type.dtype_specs()[1]) + " *" + name for var, name in - zip(node.inputs, in_name) + - zip(node.outputs, out_name)] + ["int size"]) + list(zip(node.inputs, in_name)) + + list(zip(node.outputs, out_name))] + ["int size"]) mod = SourceModule(""" __global__ void %s(%s) { --- ./theano/misc/pkl_utils.py (original) +++ ./theano/misc/pkl_utils.py (refactored) @@ -78,8 +78,8 @@ pass if self.is_verbose: - print(sys.exc_info()) - print(func, args) + print((sys.exc_info())) + print((func, args)) raise --- ./theano/misc/ordered_set.py (original) +++ ./theano/misc/ordered_set.py (refactored) @@ -16,7 +16,7 @@ # theano to use exceptions correctly, so that this can be a TypeError. if iterable is not None: assert isinstance(iterable, ( - list, tuple, OrderedSet, types.GeneratorType, basestring)) + list, tuple, OrderedSet, types.GeneratorType, str)) if MutableSet is not None: # Copyright (C) 2009 Raymond Hettinger @@ -65,7 +65,7 @@ # Checks added by IG check_deterministic(iterable) self.__root = root = Link() # sentinel node for doubly linked list - root.prev = root.next = root + root.prev = root.__next__ = root self.__map = {} # key --> link if iterable is not None: self |= iterable @@ -90,16 +90,16 @@ # then removed by updating the links in the predecessor and successors. if key in self.__map: link = self.__map.pop(key) - link.prev.next = link.next + link.prev.next = link.__next__ link.next.prev = link.prev def __iter__(self): # Traverse the linked list in order. root = self.__root - curr = root.next + curr = root.__next__ while curr is not root: yield curr.key - curr = curr.next + curr = curr.__next__ def __reversed__(self): # Traverse the linked list in reverse order. @@ -214,7 +214,7 @@ if __name__ == '__main__': - print list(OrderedSet('abracadaba')) - print list(OrderedSet('simsalabim')) - print OrderedSet('boom') == OrderedSet('moob') - print OrderedSet('boom') == 'moob' + print(list(OrderedSet('abracadaba'))) + print(list(OrderedSet('simsalabim'))) + print(OrderedSet('boom') == OrderedSet('moob')) + print(OrderedSet('boom') == 'moob') --- ./theano/misc/nose_pr.py (original) +++ ./theano/misc/nose_pr.py (refactored) @@ -9,7 +9,7 @@ Usage: python test_pr.py 1657 """ -from __future__ import print_function + import errno from glob import glob @@ -25,7 +25,7 @@ PIPE, STDOUT, CalledProcessError) import sys -import gh_api +from . import gh_api basedir = os.path.join(os.path.expanduser("~"), ".theano_pr_tests") repodir = os.path.join(basedir, "Theano") @@ -58,7 +58,7 @@ try: os.mkdir(basedir) - except OSError, e: + except OSError as e: if e.errno != errno.EEXIST: raise os.chdir(basedir) @@ -134,7 +134,7 @@ try: return True, check_output([iptest], stderr=STDOUT).decode('utf-8') - except CalledProcessError, e: + except CalledProcessError as e: return False, e.output.decode('utf-8') finally: # Restore $PATH --- ./theano/misc/latence_gpu_transfert.py (original) +++ ./theano/misc/latence_gpu_transfert.py (refactored) @@ -8,12 +8,12 @@ x = theano.shared(numpy.zeros(1,dtype='float32')) f1 = theano.function([y],updates={x:y}) f2 = theano.function([]RefactoringTool: Refactored ./theano/misc/gnumpy_utils.py RefactoringTool: Refactored ./theano/misc/gh_api.py RefactoringTool: No changes to ./theano/misc/doubleop.py RefactoringTool: Refactored ./theano/misc/cudamat_utils.py RefactoringTool: No changes to ./theano/misc/cpucount.py RefactoringTool: Refactored ./theano/misc/check_duplicate_key.py ,theano.sandbox.cuda.host_from_gpu(x)) -print f1.maker.fgraph.toposort() -print f2.maker.fgraph.toposort() +print(f1.maker.fgraph.toposort()) +print(f2.maker.fgraph.toposort()) for i in [1,10,100,1000, 10000, 100000,1000000, 10000000]: o = numpy.zeros(i, dtype='float32') t0=time.time();f1(o);t1=time.time(); tf1=t1-t0 t0=time.time();f2();t1=time.time(); - print "%8i %6.1f ns %7.1f ns"%(i, tf1*1e6,(t1-t0)*1e6) + print("%8i %6.1f ns %7.1f ns"%(i, tf1*1e6,(t1-t0)*1e6)) --- ./theano/misc/gnumpy_utils.py (original) +++ ./theano/misc/gnumpy_utils.py (refactored) @@ -2,6 +2,7 @@ This code can only work if gnumpy and theano are initialized on the same gpu as theano. """ +from functools import reduce try: import gnumpy @@ -113,7 +114,7 @@ strides = tuple(strides) import ctypes - ptr_long = long(ctypes.cast(x._base.mat.data_device, ctypes.c_void_p).value) + ptr_long = int(ctypes.cast(x._base.mat.data_device, ctypes.c_void_p).value) # seems legit. z = cuda.from_gpu_pointer(ptr_long, x.shape, strides, x._base) --- ./theano/misc/gh_api.py (original) +++ ./theano/misc/gh_api.py (refactored) @@ -1,5 +1,5 @@ """Functions for Github authorisation.""" -from __future__ import print_function + try: input = raw_input @@ -29,7 +29,7 @@ print("Please enter your github username and password. These are not " "stored, only used to get an oAuth token. You can revoke this at " "any time on Github.") - user = input("Username: ") + user = eval(input("Username: ")) pw = getpass.getpass("Password: ") auth_request = { --- ./theano/misc/cudamat_utils.py (original) +++ ./theano/misc/cudamat_utils.py (refactored) @@ -104,7 +104,7 @@ strides = tuple(strides[::-1]) import ctypes - ptr_long = long(ctypes.cast(x.mat.data_device, ctypes.c_void_p).value) + ptr_long = int(ctypes.cast(x.mat.data_device, ctypes.c_void_p).value) # seems legit. --- ./theano/misc/check_duplicate_key.py (original) +++ ./theano/misc/check_duplicate_key.py (refactored) @@ -1,4 +1,4 @@ -import cPickle +import pickle import os, sys import theano @@ -41,23 +41,23 @@ del f del path except IOError: - print dir, "don't have a mod.{cpp,cu} file" + print(dir, "don't have a mod.{cpp,cu} file") pass if DISPLAY_DUPLICATE_KEYS: - for k, v in keys.iteritems(): + for k, v in keys.items(): if v > 1: - print "Duplicate key (%i copies): %s" % (v, cPickle.loads(k)) + print("Duplicate key (%i copies): %s" % (v, pickle.loads(k))) nbs_keys = {} # nb seen -> now many key -for val in keys.values(): +for val in list(keys.values()): nbs_keys.setdefault(val, 0) nbs_keys[val]+=1 nbs_mod = {} # nb seen -> how many key nbs_mod_to_key = {} #nb seen -> keys more_than_one = 0 -for mod,kk in mods.iteritems(): +for mod,kk in mods.items(): val = len(kk) nbs_mod.setdefault(val, 0) nbs_mod[val]+=1 @@ -67,27 +67,27 @@ if DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE: m = max(nbs_mod.keys()) - print "The keys associated to the mod.{cpp,cu} with the most number of copy:" + print("The keys associated to the mod.{cpp,cu} with the most number of copy:") for kk in nbs_mod_to_key[m]: - kk = cPickle.loads(kk) - print kk + kk = pickle.loads(kk) + print(kk) -print "key.pkl histograph" -l = nbs_keys.items() +print("key.pkl histograph") +l = list(nbs_keys.items()) l.sort() -print l +print(l) -print "mod.{cpp,cu} histogram" -l = nbs_mod.items() +print("mod.{cpp,cu} histogram") +l = list(nbs_mod.items()) l.sort() -print l +print(l) -total = sum([len(k) for k in mods.values()]) +total = sum([len(k) for k in list(mods.values())]) uniq = len(mods) useless = total - uniq -print "mod.{cpp,cu} total:", total -print "mod.{cpp,cu} uniq:", uniq -print "mod.{cpp,cu} with more than 1 copy:", more_than_one -print "mod.{cpp,cu} useless:", useRefactoringTool: Refactored ./theano/misc/check_blas.py less, float(useless)/total*100,"%" +print("mod.{cpp,cu} total:", total) +print("mod.{cpp,cu} uniq:", uniq) +print("mod.{cpp,cu} with more than 1 copy:", more_than_one) +print("mod.{cpp,cu} useless:", useless, float(useless)/total*100,"%") -print "nb directory", len(dirs) +print("nb directory", len(dirs)) --- ./theano/misc/check_blas.py (original) +++ ./theano/misc/check_blas.py (refactored) @@ -39,32 +39,32 @@ """ if verbose: - print 'Some Theano flags:' - print ' blas.ldflags=', theano.config.blas.ldflags - print ' compiledir=', theano.config.compiledir - print ' floatX=', theano.config.floatX - print ' device=', theano.config.device - print 'Some OS information:' - print ' sys.platform=', sys.platform - print ' sys.version=', sys.version - print ' sys.prefix=', sys.prefix - print 'Some environment variables:' - print ' MKL_NUM_THREADS=', os.getenv('MKL_NUM_THREADS') - print ' OMP_NUM_THREADS=', os.getenv('OMP_NUM_THREADS') - print ' GOTO_NUM_THREADS=', os.getenv('GOTO_NUM_THREADS') - print + print('Some Theano flags:') + print(' blas.ldflags=', theano.config.blas.ldflags) + print(' compiledir=', theano.config.compiledir) + print(' floatX=', theano.config.floatX) + print(' device=', theano.config.device) + print('Some OS information:') + print(' sys.platform=', sys.platform) + print(' sys.version=', sys.version) + print(' sys.prefix=', sys.prefix) + print('Some environment variables:') + print(' MKL_NUM_THREADS=', os.getenv('MKL_NUM_THREADS')) + print(' OMP_NUM_THREADS=', os.getenv('OMP_NUM_THREADS')) + print(' GOTO_NUM_THREADS=', os.getenv('GOTO_NUM_THREADS')) + print() print ('Numpy config: (used when the Theano flag' ' "blas.ldflags" is empty)') numpy.show_config() - print 'Numpy dot module:', numpy.dot.__module__ - print 'Numpy location:', numpy.__file__ - print 'Numpy version:', numpy.__version__ + print('Numpy dot module:', numpy.dot.__module__) + print('Numpy location:', numpy.__file__) + print('Numpy version:', numpy.__version__) if (theano.config.device.startswith("gpu") or theano.config.init_gpu_device.startswith("gpu")): - print 'nvcc version:' + print('nvcc version:') subprocess.call((theano.sandbox.cuda.nvcc_compiler.nvcc_path, "--version")) - print + print() a = theano.shared(numpy.ones((M, N), dtype=theano.config.floatX, order=order)) @@ -150,11 +150,11 @@ options, arguments = parser.parse_args(sys.argv) if hasattr(options, "help"): - print options.help + print(options.help) sys.exit(0) if not options.quiet: - print """ + print(""" Some results that you can compare against. They were 10 executions of gemm in float64 with matrices of shape 2000x2000 (M=N=K=2000). All memory layout was in C order. @@ -227,7 +227,7 @@ GT 220 3.80s GT 210 6.35s 8500 GT 10.68s - """ + """) t, impl = execute(not options.print_only, not options.quiet, M=options.M, N=options.N, K=options.K, @@ -236,17 +236,17 @@ if options.print_only: pass elif options.quiet: - print t + print(t) else: - print - print "We executed", options.iter, - print "calls to gemm with a and b matrices of shapes", - print "(%d, %d) and (%d, %d)." % (options.M, options.N, - options.N, options.K) - - print - print 'Total execution time: %.2fs on %s.' % (t, impl) - RefactoringTool: Refactored ./theano/misc/buildbot_filter.py RefactoringTool: Refactored ./theano/ifelse.py print + print() + print("We executed", options.iter, end=' ') + print("calls to gemm with a and b matrices of shapes", end=' ') + print("(%d, %d) and (%d, %d)." % (options.M, options.N, + options.N, options.K)) + + print() + print('Total execution time: %.2fs on %s.' % (t, impl)) + print() print ('Try to run this script a few times. Experience shows that' ' the first time is not as fast as followings calls. The' ' difference is not big, but consistent.') --- ./theano/misc/buildbot_filter.py (original) +++ ./theano/misc/buildbot_filter.py (refactored) @@ -22,6 +22,6 @@ import pdb pdb.set_trace() if len(sys.argv) > 1: - print filter_output(open(sys.argv[1])) + print(filter_output(open(sys.argv[1]))) else: - print filter_output(sys.stdin) + print(filter_output(sys.stdin)) --- ./theano/ifelse.py (original) +++ ./theano/ifelse.py (refactored) @@ -20,7 +20,7 @@ __contact__ = "Razvan Pascanu " from copy import deepcopy -from itertools import izip + import logging from theano.gof import PureOp, Apply @@ -69,7 +69,7 @@ # check destroyhandler and others to ensure that a view_map with # multiple inputs can work view_map = {} - for idx in xrange(n_outs): + for idx in range(n_outs): view_map[idx] = [idx + 1] self.view_map = view_map self.as_view = as_view @@ -177,7 +177,7 @@ ts = args[:self.n_outs] fs = args[self.n_outs:] - for t, f in izip(ts, fs): + for t, f in zip(ts, fs): if t.type != f.type: raise TypeError(('IfElse requires same types for true and ' 'false return values'), t, f, t.type, f.type) @@ -237,12 +237,12 @@ else: truthval = storage_map[cond][0] if truthval != 0: - ls = [idx + 1 for idx in xrange(self.n_outs) + ls = [idx + 1 for idx in range(self.n_outs) if not compute_map[ts[idx]][0]] if len(ls) > 0: return ls else: - for out, outtype, t in izip(outputs, outtypes, ts): + for out, outtype, t in zip(outputs, outtypes, ts): compute_map[out][0] = 1 if self.as_view: oval = outtype.filter(storage_map[t][0]) @@ -252,12 +252,12 @@ storage_map[out][0] = oval return [] else: - ls = [1 + idx + self.n_outs for idx in xrange(self.n_outs) + ls = [1 + idx + self.n_outs for idx in range(self.n_outs) if not compute_map[fs[idx]][0]] if len(ls) > 0: return ls else: - for out, outtype, f in izip(outputs, outtypes, fs): + for out, outtype, f in zip(outputs, outtypes, fs): compute_map[out][0] = 1 # can't view both outputs unless destroyhandler # improves @@ -325,7 +325,7 @@ # we will store them in these new_... lists. new_then_branch = [] new_else_branch = [] - for then_branch_elem, else_branch_elem in izip(then_branch, else_branch): + for then_branch_elem, else_branch_elem in zip(then_branch, else_branch): if not isinstance(then_branch_elem, theano.Variable): then_branch_elem = theano.tensor.as_tensor_variable( then_branch_elem) @@ -508,11 +508,11 @@ ins_t = tval.owner.inputs[1:][:ins_op.n_outs] replace[idx + 1] = ins_t[tval.owner.outputs.index(tval)] - if len(replace.itemRefactoringTool: Refactored ./theano/gradient.py s()) == 0: + if len(list(replace.items())) == 0: return False old_ins = list(node.inputs) - for pos, var in replace.items(): + for pos, var in list(replace.items()): old_ins[pos] = var return op(*old_ins, **dict(return_list=True)) @@ -533,11 +533,11 @@ replace[idx + 1 + op.n_outs] = \ ins_t[fval.owner.outputs.index(fval)] - if len(replace.items()) == 0: + if len(list(replace.items())) == 0: return False old_ins = list(node.inputs) - for pos, var in replace.items(): + for pos, var in list(replace.items()): old_ins[pos] = var return op(*old_ins, **dict(return_list=True)) @@ -549,7 +549,7 @@ def apply(self, fgraph): nodelist = list(fgraph.toposort()) - cond_nodes = filter(lambda s: isinstance(s.op, IfElse), nodelist) + cond_nodes = [s for s in nodelist if isinstance(s.op, IfElse)] if len(cond_nodes) < 2: return False merging_node = cond_nodes[0] @@ -576,7 +576,7 @@ as_view=False, gpu=False, name=mn_name + '&' + pl_name) - print 'here' + print('here') new_outs = new_ifelse(*new_ins, **dict(return_list=True)) new_outs = [clone(x) for x in new_outs] old_outs = [] @@ -588,7 +588,7 @@ old_outs += [proposal.outputs] else: old_outs += proposal.outputs - pairs = zip(old_outs, new_outs) + pairs = list(zip(old_outs, new_outs)) fgraph.replace_all_validate(pairs, reason='cond_merge') @@ -603,22 +603,22 @@ # sync outs out_map = {} - for idx in xrange(len(node.outputs)): + for idx in range(len(node.outputs)): if idx not in out_map: - for jdx in xrange(idx + 1, len(node.outputs)): + for jdx in range(idx + 1, len(node.outputs)): if (ts[idx] == ts[jdx] and fs[idx] == fs[jdx] and jdx not in out_map): out_map[jdx] = idx - if len(out_map.keys()) == 0: + if len(list(out_map.keys())) == 0: return False nw_ts = [] nw_fs = [] inv_map = {} pos = 0 - for idx in xrange(len(node.outputs)): + for idx in range(len(node.outputs)): if idx not in out_map: inv_map[idx] = pos pos = pos + 1 @@ -634,8 +634,8 @@ new_outs = new_ifelse(*new_ins, **dict(return_list=True)) rval = [] - for idx in xrange(len(node.outputs)): - if idx in out_map.keys(): + for idx in range(len(node.outputs)): + if idx in list(out_map.keys()): rval += [new_outs[inv_map[out_map[idx]]]] else: rval += [new_outs[inv_map[idx]]] @@ -692,7 +692,7 @@ old_outs += [proposal.outputs] else: old_outs += proposal.outputs - pairs = zip(old_outs, new_outs) + pairs = list(zip(old_outs, new_outs)) main_outs = clone(main_node.outputs, replace=pairs) return main_outs --- ./theano/gradient.py (original) +++ ./theano/gradient.py (refactored) @@ -7,8 +7,9 @@ __docformat__ = "restructuredtext en" -import __builtin__ -from itertools import izip +import builtins +from functools import reduce + import logging import warnings _logger = logging.getLogger('theano.gradient') @@ -338,7 +339,7 @@ wrt = [wrt] assert len(f) == len(grads) - known = dict(izip(f, grads)) + known = dict(zip(f, grads)) ret = grad(cost=None, known_grads=known, consider_constant=consider_constant, wrt=wrt, @@ -431,7 +432,7 @@ if cost is not None: outputs.append(cost) if known_grads is not None: - outputs.extend(known_grads.keys()) + outputs.extend(list(known_grads.keys())) var_to_app_to_idx = _populate_var_to_app_to_idx( outputs, wrt, consider_constantRefactoringTool: Refactored ./theano/gof/vm.py ) @@ -527,7 +528,7 @@ rval = _populate_grad_dict(var_to_app_to_idx, grad_dict, wrt, cost_name) - for i in xrange(len(rval)): + for i in range(len(rval)): if isinstance(rval[i].type, DisconnectedType): handle_disconnected(rval[i]) if return_disconnected == 'zero': @@ -575,7 +576,7 @@ for ipt in node.inputs] assert isinstance(connection_pattern, list) assert len(connection_pattern) == len(node.inputs) - for ii in xrange(len(node.inputs)): + for ii in range(len(node.inputs)): assert isinstance(connection_pattern[ii], list) assert len(connection_pattern[ii]) == \ len(node.outputs) @@ -843,7 +844,7 @@ # each destroyed input. try: dinputs = [node.inputs[x[0]] for x in - node.op.destroy_map.values()] + list(node.op.destroy_map.values())] except AttributeError: dinputs = [] @@ -904,7 +905,7 @@ # because some gradients are only ever specified by the user, not computed # by Op.grad, and some gradients are only computed and returned, but never # passed as another node's output grads. - for idx, packed in enumerate(izip(node.outputs, + for idx, packed in enumerate(zip(node.outputs, new_output_grads)): orig_output, new_output_grad = packed if not hasattr(orig_output, 'shape'): @@ -1202,9 +1203,9 @@ # if not dtypes == [dtypes[0]] * len(apt): # raise TypeError('All function arguments must have same dtype') - total_size = __builtin__.sum(prod(sh) for sh in shapes) - - working_dtype = __builtin__.min((self.type_eps[dt], dt) + total_size = builtins.sum(prod(sh) for sh in shapes) + + working_dtype = builtins.min((self.type_eps[dt], dt) for dt in dtypes)[1] # create un-initialized memory @@ -1215,7 +1216,7 @@ gx = numpy.ndarray((total_size,), dtype=working_dtype) if eps is None: - eps = __builtin__.max(self.type_eps[dt] for dt in dtypes) + eps = builtins.max(self.type_eps[dt] for dt in dtypes) # set up aliases so that apt[i] is backed by memory in x # and self.gf is backed by memory in gx @@ -1234,7 +1235,7 @@ # now iterate over the elements of x, and call f on apt. x_copy = x.copy() - for i in xrange(total_size): + for i in range(total_size): x[:] = x_copy x[i] += eps @@ -1395,9 +1396,9 @@ float64=1e-4) if abs_tol is None: - abs_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt) + abs_tol = builtins.max(_type_tol[str(p.dtype)] for p in pt) if rel_tol is None: - rel_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt) + rel_tol = builtins.max(_type_tol[str(p.dtype)] for p in pt) if rng is None: raise TypeError(('rng should be a valid instance of ' @@ -1464,7 +1465,7 @@ grad_fn = function(tensor_pt, symbolic_grad) - for test_num in xrange(n_tests): + for test_num in range(n_tests): try: num_grad = numeric_grad(cost_fn, [p.copy() for p in pt], eps, out_type) @@ -1486,7 +1487,7 @@ # get new random projection for next test if test_num < n_tests - 1: t_r.set_value(random_projection(), borrow=True) - except Exception, e: + except Exception as e: e.args += ("\nThe error happened with the following inputs:", pt, "\nThe value of eps is:", eps, "\nThe out_type is:", out_type) --- ./theano/gof/vm.py (original) +++ ./theano/gof/vm.py (refactored) @@ -3,7 +3,7 @@ A VM is not actually different from a Linker, we just decided VM waRefactoringTool: Refactored ./theano/gof/utils.py s a better name at some point. """ -import link +from . import link import logging import os import sys @@ -142,7 +142,7 @@ profile.variable_strides = self.variable_strides.copy() # clear the timer info out of the buffers - for i in xrange(len(self.call_times)): + for i in range(len(self.call_times)): self.call_times[i] = 0.0 self.call_counts[i] = 0 @@ -326,7 +326,7 @@ last_apply_stack_len = -1 #This record all function inputs/shared varibles and constants - for var, data in self.storage_map.iteritems(): + for var, data in self.storage_map.items(): if data[0] is None: continue if hasattr(var.type, 'get_shape_info'): @@ -513,7 +513,7 @@ try: - import lazylinker_c + from . import lazylinker_c class CVM(lazylinker_c.CLazyLinker, VM): def __init__(self, *args, **kwargs): @@ -521,7 +521,7 @@ # skip VM.__init__ except ImportError: pass -except (OSError, theano.gof.cmodule.MissingGXX), e: +except (OSError, theano.gof.cmodule.MissingGXX) as e: # OSError happens when g++ is not installed. In that case, we # already changed the default linker to something else then CVM. # Currently this is the py linker. @@ -695,17 +695,17 @@ nodes_idx_inv = {} vars_idx_inv = {} - for (node, i) in nodes_idx.items(): + for (node, i) in list(nodes_idx.items()): nodes_idx_inv[i] = node - for (var, i) in vars_idx.items(): + for (var, i) in list(vars_idx.items()): vars_idx_inv[i] = var # put storage_map and compute_map into a int-based scheme n_applies = len(nodes) storage_map_list = [storage_map[vars_idx_inv[i]] - for i in xrange(len(vars_idx_inv))] + for i in range(len(vars_idx_inv))] compute_map_list = [compute_map[vars_idx_inv[i]] - for i in xrange(len(vars_idx_inv))] + for i in range(len(vars_idx_inv))] if nodes: assert type(storage_map_list[0]) is list assert type(compute_map_list[0]) is list @@ -714,7 +714,7 @@ dependency_map = self.compute_gc_dependencies(storage_map) dependency_map_list = [ [vars_idx[d] for d in dependency_map[vars_idx_inv[i]]] - for i in xrange(len(vars_idx_inv))] + for i in range(len(vars_idx_inv))] else: dependency_map_list = None @@ -736,7 +736,7 @@ # build the var owner array var_owner = [None] * len(vars_idx) - for (var, i) in vars_idx.items(): + for (var, i) in list(vars_idx.items()): if var.owner: var_owner[i] = nodes_idx[var.owner] @@ -764,7 +764,7 @@ # values of the update expressions). update_storage = [] update_in_from_out = {} - for (ivar, ovar) in updated_vars.items(): + for (ivar, ovar) in list(updated_vars.items()): update_in_from_out[vars_idx[ovar]] = vars_idx[ivar] for oidx in output_vars: if oidx in update_in_from_out: @@ -845,7 +845,7 @@ storage_map, compute_map, no_recycling)) - except Exception, e: + except Exception as e: e.args = ("The following error happened while" " compiling the node", node, "\n") + e.args raise --- ./theano/gof/utils.py (original) +++ ./theano/gof/utils.py (refactored) @@ -18,7 +18,7 @@ def hashgen(): hashgen.next += 1 - return hashgen.next + return hashgen.__next__ hashgen.next = 0 @@ -64,9 +64,9 @@ return "scratchpad" + stRefactoringTool: Refactored ./theano/gof/unify.py r(self.__dict__) def info(self): - print "" % id(self) - for k, v in self.__dict__.items(): - print " %s: %s" % (k, v) + print("" % id(self)) + for k, v in list(self.__dict__.items()): + print(" %s: %s" % (k, v)) class D: @@ -111,8 +111,8 @@ def g(*args, **kwargs): if printme[0]: - print 'WARNING: %s.%s deprecated. %s'\ - % (filename, f.__name__, msg) + print('WARNING: %s.%s deprecated. %s'\ + % (filename, f.__name__, msg)) printme[0] = False return f(*args, **kwargs) return g @@ -141,7 +141,7 @@ raise Exception('not worth it') set2 = set(seq2) return [x for x in seq1 if x not in set2] - except Exception, e: + except Exception as e: # maybe a seq2 element is not hashable # maybe seq2 is too short # -> use O(len(seq1) * len(seq2)) algo @@ -240,7 +240,7 @@ seq = [] done = set() postreqs_d = {} - for x, prereqs in prereqs_d.items(): + for x, prereqs in list(prereqs_d.items()): for prereq in prereqs: postreqs_d.setdefault(prereq, set()).add(x) next = set([k for k in prereqs_d if not prereqs_d[k]]) @@ -269,11 +269,11 @@ for op in self.order: for input in op.inputs: if input.owner: - print ' '.join(( + print(' '.join(( input.owner.__class__.__name__ + str(abs(id(input.owner))), " -> ", op.__class__.__name__ + str(abs(id(op))), - ";")) + ";"))) class Keyword: @@ -282,7 +282,7 @@ self.name = name self.nonzero = nonzero - def __nonzero__(self): + def __bool__(self): return self.nonzero def __str__(self): @@ -305,7 +305,7 @@ def comm_guard(type1, type2): def wrap(f): - old_f = f.func_globals[f.__name__] + old_f = f.__globals__[f.__name__] def new_f(arg1, arg2, *rest): if (type1 is ANY_TYPE or isinstance(arg1, type1)) \ @@ -343,7 +343,7 @@ def type_guard(type1): def wrap(f): - old_f = f.func_globals[f.__name__] + old_f = f.__globals__[f.__name__] def new_f(arg1, *rest): if (type1 is ANY_TYPE or isinstance(arg1, type1)): @@ -398,14 +398,14 @@ """ Gives unique names to an iterable of variables. Modifies input. This function is idempotent.""" - names = map(lambda var: var.name, variables) + names = [var.name for var in variables] h = hist(names) bad_var = lambda var: not var.name or h[var.name] > 1 for i, var in enumerate(filter(bad_var, variables)): var.name = (var.name or "") + "_%d" % i - if not unique(map(str, variables)): + if not unique(list(map(str, variables))): raise ValueError("Not all variables have unique names." "Maybe you've named some of the variables identically") @@ -421,4 +421,4 @@ >>> remove(even, [1, 2, 3, 4]) [1, 3] """ - return filter(lambda x: not predicate(x), coll) + return [x for x in coll if not predicate(x)] --- ./theano/gof/unify.py (original) +++ ./theano/gof/unify.py (refactored) @@ -35,7 +35,7 @@ def __init__(self, name = "?"): self.name = name def __str__(self): - return self.__class__.__name__ + "(" + ", ".join(["%s=%s" % (key, value) for key, value in self.__dict__.items()]) + ")" + return self.__class__.__name__ + "(" + ", ".join(["%s=%s" % (key, value) for key, value in list(self.__dict__.items())]) + ")" def __repr__(self): return str(self) @@ -129,7 +129,7 @@ else: # Copy all the unification data. U = Unification(self.inplace) - for var, (best, pool) in self.unif.items(): + for var, (best, pool) in list(selRefactoringTool: No changes to ./theano/gof/type.py RefactoringTool: Refactored ./theano/gof/toolbox.py f.unif.items()): # The pool of a variable is the set of all the variables that # are unified to it (all the variables that must have the same # value). The best is the Variable that represents a set of @@ -313,8 +313,8 @@ """ Tries to unify values of corresponding keys. """ - for (k1, v1) in d1.items(): - if d2.has_key(k1): + for (k1, v1) in list(d1.items()): + if k1 in d2: U = unify_walk(v1, d2[k1], U) if U is False: return False @@ -384,13 +384,13 @@ @comm_guard(dict, dict) def unify_merge(d1, d2, U): d = d1.__class__() - for k1, v1 in d1.items(): - if d2.has_key(k1): + for k1, v1 in list(d1.items()): + if k1 in d2: d[k1] = unify_merge(v1, d2[k1], U) else: d[k1] = unify_merge(v1, v1, U) - for k2, v2 in d2.items(): - if not d1.has_key(k2): + for k2, v2 in list(d2.items()): + if k2 not in d1: d[k2] = unify_merge(v2, v2, U) return d @@ -461,15 +461,15 @@ U = unify_walk(pattern1, pattern2, Unification()) if U: - print U[va] - print U[vx] - print U[vy] - print U[vz] - print unify_merge(pattern1, pattern2, U) - else: - print "no match" + print(U[va]) + print(U[vx]) + print(U[vy]) + print(U[vz]) + print(unify_merge(pattern1, pattern2, U)) + else: + print("no match") U = unify_walk((1, 2), (va, va), Unification()) - print U[va] - + print(U[va]) + --- ./theano/gof/toolbox.py (original) +++ ./theano/gof/toolbox.py (refactored) @@ -206,23 +206,23 @@ for r, new_r in replacements: try: fgraph.replace(r, new_r, reason=reason, verbose=False) - except Exception, e: + except Exception as e: if ('The type of the replacement must be the same' not in str(e) and 'does not belong to this FunctionGraph' not in str(e)): out = sys.stderr - print >> out, "<>", - print >> out, type(e), e, reason + print("<>", end=' ', file=out) + print(type(e), e, reason, file=out) # this might fail if the error is in a listener: # (fgraph.replace kinda needs better internal error handling) fgraph.revert(chk) raise try: fgraph.validate() - except Exception, e: + except Exception as e: fgraph.revert(chk) raise if verbose: - print reason, r, new_r + print(reason, r, new_r) return chk def replace_all_validate_remove(self, fgraph, replacements, @@ -237,14 +237,14 @@ fgraph.revert(chk) if warn: out = sys.stderr - print >> out, ( + print(( "WARNING: An optimization wanted to replace a Variable" " in the graph, but the replacement for it doesn't" " remove it. We disabled the optimization." " Your function runs correctly, but it would be" " appreciated if you submit this problem to the" - " mailing list theano-users so that we can fix it.") - print >> out, reason, replacements + " mailing list theano-users so that we can fix it."), file=out) + print(reason, replacements, file=out) raise ReplacementDidntRemovedError() @@ -277,12 +277,12 @@ self.d.setdefault(node.op, []).append(node) except TypeError: # node.op is unhashable return - except Exception, e: - print >> sys.stderr, 'OFFENDING node', type(node), typeRefactoringTool: Refactored ./theano/gof/tests/test_vm.py (node.op) + except Exception as e: + print('OFFENDING node', type(node), type(node.op), file=sys.stderr) try: - print >> sys.stderr, 'OFFENDING node hash', hash(node.op) + print('OFFENDING node hash', hash(node.op), file=sys.stderr) except Exception: - print >> sys.stderr, 'OFFENDING node not hashable' + print('OFFENDING node not hashable', file=sys.stderr) raise e def on_prune(self, fgraph, node, reason): @@ -311,24 +311,24 @@ def on_attach(self, fgraph): if self.active: - print "-- attaching to: ", fgraph - - def on_detach(self, fgraph): - if self.active: - print "-- detaching from: ", fgraph + print("-- attaching to: ", fgraph) + + def on_detach(self, fgraph): + if self.active: + print("-- detaching from: ", fgraph) def on_import(self, fgraph, node, reason): if self.active: - print "-- importing: %s, reason: %s" % (node, reason) + print("-- importing: %s, reason: %s" % (node, reason)) def on_prune(self, fgraph, node, reason): if self.active: - print "-- pruning: %s, reason: %s" % (node, reason) + print("-- pruning: %s, reason: %s" % (node, reason)) def on_change_input(self, fgraph, node, i, r, new_r, reason=None): if self.active: - print "-- changing (%s.inputs[%s]) from %s to %s" % ( - node, i, r, new_r) + print("-- changing (%s.inputs[%s]) from %s to %s" % ( + node, i, r, new_r)) class PreserveNames(Feature): --- ./theano/gof/tests/test_vm.py (original) +++ ./theano/gof/tests/test_vm.py (refactored) @@ -68,7 +68,7 @@ def numpy_version(x, depth): z = x - for d in xrange(depth): + for d in range(depth): z = (z+z) return z def time_numpy(): @@ -86,9 +86,9 @@ t_a = t1 - t0 t_b = t3 - t2 - print "%s takes %f s/Kop" % ( + print("%s takes %f s/Kop" % ( 'numpy', - (1000*(t_b-t_a) / (steps_b - steps_a))) + (1000*(t_b-t_a) / (steps_b - steps_a)))) def time_linker(name, linker): steps_a = 5 @@ -121,9 +121,9 @@ t_a = t1 - t0 t_b = t3 - t2 - print "%s takes %f s/Kop" % ( + print("%s takes %f s/Kop" % ( name, - (1000*(t_b-t_a) / (steps_b - steps_a))) + (1000*(t_b-t_a) / (steps_b - steps_a)))) time_linker('c|py', OpWiseCLinker) time_linker('vmLinker', vm.VM_Linker) @@ -174,9 +174,9 @@ t_a = t1 - t0 t_b = t3 - t2 - print "%s takes %f s/Kop" % ( + print("%s takes %f s/Kop" % ( name, - (1000*(t_b-t_a) / (steps_b - steps_a))) + (1000*(t_b-t_a) / (steps_b - steps_a)))) time_linker('vmLinker', vm.VM_Linker) time_linker('vmLinker_nogc', lambda : vm.VM_Linker(allow_gc=False)) @@ -191,15 +191,15 @@ # was stable. def test_leak2(): import theano.sandbox.cuda as cuda - for i in xrange(1000000): + for i in range(1000000): n = numpy.asarray([2.3, 4.5], dtype='f') c = sys.getrefcount(n) a = cuda.CudaNdarray(n) assert c == sys.getrefcount(n) if not i % 1000: - print '.', - print gc.collect(), - print gc.collect() + print('.', end=' ') + print(gc.collect(), end=' ') + print(gc.collect()) sys.stdout.flush() def test_no_leak_many_graphs(): @@ -207,7 +207,7 @@ # This isn't really a unit test, you have to run it and look at top to # see if there's a leak - for i in xrange(10000): + for i in range(10000): x = tensor.vector() z = x for d in range(10): @@ -215,7 +215,7 @@ f = functionRefactoringTool: Refactored ./theano/gof/tests/test_utils.py RefactoringTool: Refactored ./theano/gof/tests/test_toolbox.py RefactoringTool: Refactored ./theano/gof/tests/test_sched.py RefactoringTool: Refactored ./theano/gof/tests/test_optdb.py RefactoringTool: Refactored ./theano/gof/tests/test_opt.py ([x], z, mode=Mode(optimizer=None, linker='cvm')) if not i % 100: - print gc.collect() + print(gc.collect()) sys.stdout.flush() gc.collect() @@ -246,15 +246,15 @@ mode=Mode(optimizer=None, linker=linker())) - for i in xrange(100000): + for i in range(100000): f_a([2.0]) if 0: # this doesn't seem to work, prints 0 for everything import resource pre = resource.getrusage(resource.RUSAGE_SELF) post = resource.getrusage(resource.RUSAGE_SELF) - print pre.ru_ixrss, post.ru_ixrss - print pre.ru_idrss, post.ru_idrss - print pre.ru_maxrss, post.ru_maxrss + print(pre.ru_ixrss, post.ru_ixrss) + print(pre.ru_idrss, post.ru_idrss) + print(pre.ru_maxrss, post.ru_maxrss) time_linker('vmLinker_C', lambda: vm.VM_Linker(allow_gc=False, use_cloop=True)) @@ -280,7 +280,7 @@ mode=Mode(optimizer=None, linker=linker())) - for i in xrange(500000): + for i in range(500000): f_a([2.0]) time_linker('vmLinker_C', --- ./theano/gof/tests/test_utils.py (original) +++ ./theano/gof/tests/test_utils.py (refactored) @@ -41,4 +41,4 @@ odd = lambda x: x % 2 == 1 # The list are neede as with python 3, remove and filter return generators # and we can't compare generators. - assert list(remove(even, range(5))) == list(filter(odd, range(5))) + assert list(remove(even, list(range(5)))) == list(filter(odd, list(range(5)))) --- ./theano/gof/tests/test_toolbox.py (original) +++ ./theano/gof/tests/test_toolbox.py (refactored) @@ -39,7 +39,7 @@ def make_node(self, *inputs): assert len(inputs) == self.nin - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): raise Exception("Error 1") --- ./theano/gof/tests/test_sched.py (original) +++ ./theano/gof/tests/test_sched.py (refactored) @@ -60,7 +60,7 @@ def test_posort(): - l = range(1, 20) + l = list(range(1, 20)) cmps = [lambda a, b: a % 10 - b % 10, lambda a, b: (a / 10) % 2 - (b / 10) % 2, lambda a, b: a - b] --- ./theano/gof/tests/test_optdb.py (original) +++ ./theano/gof/tests/test_optdb.py (refactored) @@ -21,7 +21,7 @@ try: db.register('c', Opt()) # name taken self.fail() - except ValueError, e: + except ValueError as e: if exc_message(e).startswith("The name"): pass else: @@ -32,7 +32,7 @@ try: db.register('z', Opt()) # name collides with tag self.fail() - except ValueError, e: + except ValueError as e: if exc_message(e).startswith("The name"): pass else: @@ -43,7 +43,7 @@ try: db.register('u', Opt(), 'b') # name new but tag collides with name self.fail() - except ValueError, e: + except ValueError as e: if exc_message(e).startswith("The tag"): pass else: --- ./theano/gof/tests/test_opt.py (original) +++ ./theano/gof/tests/test_opt.py (refactored) @@ -39,7 +39,7 @@ self.x = x def make_node(self, *inputs): - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): raise Exception("Error 1") @@ -357,7 +357,7 @@ class TestEquilibrium(object): def test_1(self): - x, y, z = map(MyVariable, 'xyz') + x, y, z = list(map(MyVariable, 'xyz')) e = op3(op4(x, y)) g = Env([x, y, z], [e]) #print g @@ -372,7 +372,7 @@RefactoringTool: Refactored ./theano/gof/tests/test_op.py RefactoringTool: Refactored ./theano/gof/tests/test_link.py RefactoringTool: Refactored ./theano/gof/tests/test_lazy.py RefactoringTool: Refactored ./theano/gof/tests/test_graph.py RefactoringTool: Refactored ./theano/gof/tests/test_destroyhandler.py assert str(g) == '[Op2(x, y)]' def test_2(self): - x, y, z = map(MyVariable, 'xyz') + x, y, z = list(map(MyVariable, 'xyz')) e = op1(op1(op3(x, y))) g = Env([x, y, z], [e]) #print g @@ -388,7 +388,7 @@ assert str(g) == '[Op2(x, y)]' def test_low_use_ratio(self): - x, y, z = map(MyVariable, 'xyz') + x, y, z = list(map(MyVariable, 'xyz')) e = op3(op4(x, y)) g = Env([x, y, z], [e]) #print 'before', g --- ./theano/gof/tests/test_op.py (original) +++ ./theano/gof/tests/test_op.py (refactored) @@ -38,7 +38,7 @@ def filter(self, x, strict=False, allow_downcast=None): # Dummy filter: we want this type to represent strings that # start with `self.thingy`. - if not isinstance(x, basestring): + if not isinstance(x, str): raise TypeError("Invalid type") if not x.startswith(self.thingy): raise ValueError("Invalid value") @@ -48,7 +48,7 @@ class MyOp(Op): def make_node(self, *inputs): - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): raise Exception("Error 1") @@ -92,7 +92,7 @@ try: MyOp(Generic()(), MyType(1)()) # MyOp requires MyType instances raise Exception("Expected an exception") - except Exception, e: + except Exception as e: if str(e) != "Error 1": raise @@ -193,7 +193,7 @@ def test_test_value_python_objects(): - for x in (range(3), 0, 0.5, 1): + for x in (list(range(3)), 0, 0.5, 1): assert (op.get_test_value(x) == x).all() --- ./theano/gof/tests/test_link.py (original) +++ ./theano/gof/tests/test_link.py (refactored) @@ -34,7 +34,7 @@ def make_node(self, *inputs): assert len(inputs) == self.nin - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if input.type is not tdouble: raise Exception("Error 1") --- ./theano/gof/tests/test_lazy.py (original) +++ ./theano/gof/tests/test_lazy.py (refactored) @@ -159,9 +159,9 @@ except NotImplementedOp.E: pass else: - print f(1, 0, numpy.array(10, dtype=x1.dtype), 0) + print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0)) assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5 - print '... passed' + print('... passed') if __name__ == '__main__': more_complex_test() --- ./theano/gof/tests/test_graph.py (original) +++ ./theano/gof/tests/test_graph.py (refactored) @@ -34,10 +34,10 @@ class MyOp(Op): def make_node(self, *inputs): - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): - print input, input.type, type(input), type(input.type) + print(input, input.type, type(input), type(input.type)) raise Exception("Error 1") outputs = [MyVariable(sum([input.type.thingy for input in inputs]))] return Apply(self, inputs, outputs) --- ./theano/gof/tests/test_destroyhandler.py (original) +++ ./theano/gof/tests/test_destroyhandler.py (refactored) @@ -63,11 +63,11 @@ def make_node(self, *inputs): assert len(inputs) == self.nin - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if not isinstance(input.type, MyType): raise Exception("Error 1") - outputs = [MyVariable(self.name + "_R") for i in xrange(self.nout)] + outputs = [MyVariable(self.name + "_R") for i in range(self.nout)] return Apply(self, inputs, outputs) def __str__(self): @@ -117,7 +117,7 @@ try: assert g.consistent() except AssertionError: - print "Test failed! The graph wRefactoringTool: Refactored ./theano/gof/tests/test_compute_test_value.py RefactoringTool: No changes to ./theano/gof/tests/test_cmodule.py RefactoringTool: Refactored ./theano/gof/tests/test_cc.py RefactoringTool: Refactored ./theano/gof/sched.py RefactoringTool: No changes to ./theano/gof/python25.py RefactoringTool: Refactored ./theano/gof/optdb.py as marked as NOT consistent." + print("Test failed! The graph was marked as NOT consistent.") raise #print "Test OK" @@ -127,7 +127,7 @@ try: assert not g.consistent() except AssertionError: - print "Test failed! The graph was marked as consistent." + print("Test failed! The graph was marked as consistent.") raise #print "Test OK" @@ -349,7 +349,7 @@ try: g = Env([x, y, z], [e]) raise Exception("Shouldn't have reached this point.") - except InconsistencyError, e: + except InconsistencyError as e: pass --- ./theano/gof/tests/test_compute_test_value.py (original) +++ ./theano/gof/tests/test_compute_test_value.py (refactored) @@ -269,7 +269,7 @@ non_sequences=A, n_steps=k) assert False - except ValueError, e: + except ValueError as e: # Get traceback tb = sys.exc_info()[2] # Get frame info 4 layers up @@ -312,7 +312,7 @@ non_sequences=A, n_steps=k) assert False - except ValueError, e: + except ValueError as e: # The first message is for numpy before 1.6. # The second is a new message in numpy 1.6. assert (str(e).startswith("shape mismatch") or --- ./theano/gof/tests/test_cc.py (original) +++ ./theano/gof/tests/test_cc.py (refactored) @@ -84,7 +84,7 @@ def make_node(self, *inputs): assert len(inputs) == self.nin - inputs = map(as_variable, inputs) + inputs = list(map(as_variable, inputs)) for input in inputs: if input.type is not tdouble: raise Exception("Error 1") @@ -310,7 +310,7 @@ # are the same. res = fn(1.0, 2.0, 3.0) raise Exception("An exception should have been raised here!") - except MyExc, e: + except MyExc as e: pass @@ -343,6 +343,6 @@ try: res = fn(1.5, 3.0) except RuntimeError: - print 'Yay, TEST PASSED' + print('Yay, TEST PASSED') return # test passed assert 0 # test failed --- ./theano/gof/sched.py (original) +++ ./theano/gof/sched.py (refactored) @@ -36,8 +36,9 @@ def make_depends(): @memodict - def depends((a, b)): + def depends(xxx_todo_changeme): """ Returns True if a depends on b """ + (a, b) = xxx_todo_changeme return (any(bout in a.inputs for bout in b.outputs) or any(depends((ainp.owner, b)) for ainp in a.inputs if ainp.owner)) @@ -103,7 +104,7 @@ [2] http://en.wikipedia.org/wiki/Toposort#Algorithms """ incoming_edges = reverse_dict(edges) - incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items()) + incoming_edges = dict((k, set(val)) for k, val in list(incoming_edges.items())) S = set((v for v in edges if v not in incoming_edges)) L = [] --- ./theano/gof/optdb.py (original) +++ ./theano/gof/optdb.py (refactored) @@ -129,9 +129,9 @@ return variable def print_summary(self, stream=sys.stdout): - print >> stream, "%s (id %i)" % (self.__class__.__name__, id(self)) - print >> stream, " names", self._names - print >> stream, " db", self.__db__ + print("%s (id %i)" % (self.__class__.__name__, id(self)), file=stream) + print(" names", self._names, file=stream) + print(" db", self.__db__, file=stream) class Query(object): @@ -246,16 +246,16 @@ return ret def print_summary(self, stream=sys.stdout): - print >> stream, "SequenceDB (id %i)" % id(self) - positions = self.__position__.items() + print("SequenceDB (id %i)" % id(self), file=stream) + positions = list(self.__position__.items()) def c(a, b): return cmp(a[1], b[1]) positions.sort(c) - print >> stream, " position", positions - print >RefactoringTool: Refactored ./theano/gof/opt.py > stream, " names", self._names - print >> stream, " db", self.__db__ + print(" position", positions, file=stream) + print(" names", self._names, file=stream) + print(" db", self.__db__, file=stream) def __str__(self): sio = StringIO() --- ./theano/gof/opt.py (original) +++ ./theano/gof/opt.py (refactored) @@ -20,6 +20,7 @@ import theano from theano import config from theano.gof.python25 import any, all, deque +from functools import reduce #if sys.version_info[:2] >= (2,5): # from collections import defaultdict @@ -27,7 +28,7 @@ _logger = logging.getLogger('theano.gof.opt') -import destroyhandler as dh +from . import destroyhandler as dh import traceback _optimizer_idx = [0] @@ -102,8 +103,8 @@ def print_summary(self, stream=sys.stdout, level=0, depth=-1): name = getattr(self, 'name', None) - print >> stream, "%s%s %s id=%i" % ( - (' ' * level), self.__class__.__name__, name, id(self)) + print("%s%s %s id=%i" % ( + (' ' * level), self.__class__.__name__, name, id(self)), file=stream) def print_profile(self, prof): if prof is not None: @@ -123,10 +124,10 @@ pass def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s id=%i" % ( + print("%s%s id=%i" % ( ' ' * level, str(self.apply), - id(self)) + id(self)), file=stream) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) @@ -188,7 +189,7 @@ except AssertionError: # do not catch Assertion failures raise - except Exception, e: + except Exception as e: if self.failure_callback: self.failure_callback(e, self, optimizer) continue @@ -211,8 +212,8 @@ def print_summary(self, stream=sys.stdout, level=0, depth=-1): name = getattr(self, 'name', None) - print >> stream, "%s%s %s id=%i" % ( - (' ' * level), self.__class__.__name__, name, id(self)) + print("%s%s %s id=%i" % ( + (' ' * level), self.__class__.__name__, name, id(self)), file=stream) # This way, -1 will do all depth if depth != 0: depth -= 1 @@ -225,20 +226,18 @@ nb_node_after, sub_profs, sub_validate_time) = prof blanc = (' ' * level) - print >> stream, blanc, "SeqOptimizer", + print(blanc, "SeqOptimizer", end=' ', file=stream) if hasattr(opts, "name"): - print >> stream, blanc, opts.name, + print(blanc, opts.name, end=' ', file=stream) elif hasattr(opts, "__name__"): - print >> stream, blanc, opts.__name__, - print >> stream, (" time %.3fs for %d/%d nodes" + print(blanc, opts.__name__, end=' ', file=stream) + print((" time %.3fs for %d/%d nodes" " before/after optimization" % ( - sum(prof), nb_node_before, nb_node_after)) - print >> stream, \ - blanc, " %.3fs for fgraph.validate()" % (validate_time) - print >> stream, \ - blanc, " %.3fs for callback" % (callback_time) + sum(prof), nb_node_before, nb_node_after)), file=stream) + print(blanc, " %.3fs for fgraph.validate()" % (validate_time), file=stream) + print(blanc, " %.3fs for callback" % (callback_time), file=stream) if level == 0: - print >> stream, blanc, " time - (name, class, index) - validate time" + print(blanc, " time - (name, class, index) - validate time", file=stream) ll = [] for opt in opts: if hasattr(opt, "__name__"): @@ -247,7 +246,7 @@ else: ll.append((opt.name, opt.__class__.__name__, opts.index(opt))) - lll = zip(prof, ll) + lll = list(zip(prof, ll)) def cmp(a, b): if a[0] == b[0]: @@ -263,15 +262,15 @@ if sub_validate_time: i = opt[-1] val_time = sub_validate_time[i + 1] - sub_validate_time[i] - print >> stream, blanc, ' %.6fs - %s - %.3fs' % ( - t, opt, val_time) + print(blanc, ' %.6fs - %s - %.3fs' % ( + t, opt, val_time), file=stream) else: - print >> stream, blanc, ' %.6fs - %s' % (t, opt) + print(blanc, ' %.6fs - %s' % (t, opt), file=stream) if sub_profs[opt[-1]]: opts[opt[-1]].print_profile(stream, sub_profs[opt[-1]], level=level + 1) - print >> stream + print(file=stream) @staticmethod def merge_profile(prof1, prof2): @@ -522,7 +521,7 @@ continue # Schedule transfer of clients from node to candidate - pairs = zip(node.outputs, candidate.outputs) + pairs = list(zip(node.outputs, candidate.outputs)) #transfer names for node_output, cand_output in pairs: @@ -593,7 +592,7 @@ validate_time = fgraph.profile.validate_time - validate_before callback_time = fgraph.execute_callbacks_time - callback_before callbacks_time = {} - for k, v in fgraph.execute_callbacks_times.iteritems(): + for k, v in fgraph.execute_callbacks_times.items(): if k in callbacks_before: callbacks_time[k] = v - callbacks_before[k] else: @@ -616,14 +615,14 @@ callback_time, callbacks_time, nb_merged, nb_constant) = prof blanc = (' ' * level) - print >> stream, blanc, "MergeOptimizer" - print >> stream, blanc, " nb_fail", nb_fail - print >> stream, blanc, " replace_time", replace_time - print >> stream, blanc, " validate_time", validate_time - print >> stream, blanc, " callback_time", callback_time - print >> stream, blanc, " callback_times", callbacks_time - print >> stream, blanc, " nb_merged", nb_merged - print >> stream, blanc, " nb_constant", nb_constant + print(blanc, "MergeOptimizer", file=stream) + print(blanc, " nb_fail", nb_fail, file=stream) + print(blanc, " replace_time", replace_time, file=stream) + print(blanc, " validate_time", validate_time, file=stream) + print(blanc, " callback_time", callback_time, file=stream) + print(blanc, " callback_times", callbacks_time, file=stream) + print(blanc, " nb_merged", nb_merged, file=stream) + print(blanc, " nb_constant", nb_constant, file=stream) merge_optimizer = MergeOptimizer() @@ -647,7 +646,7 @@ # break the mapping in givens. fgraph = theano.gof.fg.FunctionGraph(inputs, vars, clone=False) # Perform Variable substitution. - for to_replace, replace_by in givens.iteritems(): + for to_replace, replace_by in givens.items(): fgraph.replace(to_replace, replace_by) # Perform merge optimization. merge_optimizer.optimize(fgraph) @@ -716,7 +715,7 @@ var.owner.inputs[idx] = recursive_merge(inp) return var - return map(recursive_merge, vars) + return list(map(recursive_merge, vars)) ######################## @@ -765,8 +764,8 @@ pass def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s id=%i" % ( - (' ' * level), self.__class__.__name__, id(self)) + print("%s%s id=%i" % ( + (' ' * level), self.__class__.__name__, id(self)), file=stream) class FromFunctionLocalOptimizer(LocalOptimizer): @@ -785,10 +784,10 @@ '') def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s id=%i" % ( + print("%s%s id=%i" % ( ' ' * level, str(self.transform), - id(self)) + id(self)), file=stream) def local_optimizer(*tracks): @@ -822,8 +821,8 @@ return repl def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s id=%i" % ( - (' ' * level), self.__class__.__name__, id(self)) + print("%s%s id=%i" % ( + (' ' * level), self.__class__.__name__, id(self)), file=stream) if depth != 0: depth -= 1 for lopt in self.opts: @@ -912,11 +911,11 @@ return "%s(x) -> x" % (self.op) def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s(%s) id=%i" % ( + print("%s%s(%s) id=%i" % ( ' ' * level, self.__class__.__name__, str(self.op), - id(self)) + id(self)), file=stream) class PatternSub(LocalOptimizer): @@ -1065,7 +1064,7 @@ allow_multiple_clients)) else: return retry_with_equiv() - elif isinstance(pattern, basestring): + elif isinstance(pattern, str): v = unify.Var(pattern) if u[v] is not v and u[v] is not expr: return retry_with_equiv() @@ -1093,7 +1092,7 @@ if isinstance(pattern, (list, tuple)): args = [build(p, u) for p in pattern[1:]] return pattern[0](*args) - elif isinstance(pattern, basestring): + elif isinstance(pattern, str): return u[unify.Var(pattern)] elif isinstance(pattern, (int, float)): return pattern @@ -1133,13 +1132,13 @@ def print_summary(self, stream=sys.stdout, level=0, depth=-1): name = getattr(self, '__name__', getattr(self, 'name', None)) - print >> stream, "%s%s %s(%s, %s) id=%i" % ( + print("%s%s %s(%s, %s) id=%i" % ( ' ' * level, self.__class__.__name__, name, str(self.in_pattern), str(self.out_pattern), - id(self)) + id(self)), file=stream) ################## @@ -1284,7 +1283,7 @@ lopt = lopt or self.local_opt try: replacements = lopt.transform(node) - except Exception, e: + except Exception as e: if self.failure_callback is not None: self.failure_callback(e, self, [(x, None) for x in node.outputs], @@ -1315,7 +1314,7 @@ try: fgraph.replace_all_validate(repl_pairs, reason=lopt) return True - except Exception, e: + except Exception as e: # This means the replacements were rejected by the fgraph. # # This is not supposed to happen. The default failure_callback @@ -1334,8 +1333,8 @@ self.local_opt.add_requirements(fgraph) def print_summary(self, stream=sys.stdout, level=0, depth=-1): - print >> stream, "%s%s (%i)" % ( - (' ' * level), self.__class__.__name__, id(self)) + print("%s%s (%i)" % ( + (' ' * level), self.__class__.__name__, id(self)), file=stream) if depth != 0: self.local_opt.print_summary(stream, level=(level + 2), depth=(depth - 1)) @@ -1400,12 +1399,12 @@ io_t, loop_t, callback_time) = prof blanc = (' ' * level) - print >> stream, blanc, "TopoOptimizer" - print >> stream, blanc, " nb_node (start, end, changed)", ( - nb_nodes_start, nb_nodes_end, nb) - print >> stream, blanc, " init io_toposort", io_t - print >> stream, blanc, " loop time", loop_t - print >> stream, blanc, " callback_time", callback_time + print(blanc, "TopoOptimizer", file=stream) + print(blanc, " nb_node (start, end, changed)", ( + nb_nodes_start, nb_nodes_end, nb), file=stream) + print(blanc, " init io_toposort", io_t, file=stream) + print(blanc, " loop time", loop_t, file=stream) + print(blanc, " callback_time", callback_time, file=stream) def __str__(self): return getattr(self, '__name__', @@ -1426,7 +1425,7 @@ def apply(self, fgraph): op = self.local_opt.op_key() if isinstance(op, (list, tuple)): - q = reduce(list.__iadd__, map(fgraph.get_nodes, op)) + q = reduce(list.__iadd__, list(map(fgraph.get_nodes, op))) else: q = list(fgraph.get_nodes(op)) @@ -1631,8 +1630,8 @@ def print_summary(self, stream=sys.stdout, level=0, depth=-1): name = getattr(self, 'name', None) - print >> stream, "%s%s %s id=%i" % ( - (' ' * level), self.__class__.__name__, name, id(self)) + print("%s%s %s id=%i" % ( + (' ' * level), self.__class__.__name__, name, id(self)), file=stream) if depth != 0: for lopt in self.local_optimizers: lopt.print_summary(stream, level=(level + 2), @@ -1645,35 +1644,35 @@ global_opt_timing, nb_nodes, time_opts, io_toposort_timing) = prof blanc = (' ' * level) - print >> stream, blanc, "EquilibriumOptimizer", - print >> stream, blanc, getattr(opt, "name", - getattr(opt, "__name__", "")) - print >> stream, blanc, " time %.3fs for %d passes" % ( - sum(loop_timing), len(loop_timing)) - print >> stream, blanc, " nb nodes (start, end, max) %d %d %d" % ( - start_nb_nodes, end_nb_nodes, max_nb_nodes) - print >> stream, blanc, " time io_toposort %.3fs" % sum( - io_toposort_timing) + print(blanc, "EquilibriumOptimizer", end=' ', file=stream) + print(blanc, getattr(opt, "name", + getattr(opt, "__name__", "")), file=stream) + print(blanc, " time %.3fs for %d passes" % ( + sum(loop_timing), len(loop_timing)), file=stream) + print(blanc, " nb nodes (start, end, max) %d %d %d" % ( + start_nb_nodes, end_nb_nodes, max_nb_nodes), file=stream) + print(blanc, " time io_toposort %.3fs" % sum( + io_toposort_timing), file=stream) s = sum([time_opts[o] for o in opt.local_optimizers]) - print >> stream, blanc, " time in local optimizers %.3fs" % s + print(blanc, " time in local optimizers %.3fs" % s, file=stream) s = sum([time_opts[o] for o in opt.global_optimizers]) - print >> stream, blanc, " time in global optimizers %.3fs" % s + print(blanc, " time in global optimizers %.3fs" % s, file=stream) for i in range(len(loop_timing)): lopt = "" if loop_process_count[i]: - d = list(reversed(sorted(loop_process_count[i].iteritems(), + d = list(reversed(sorted(iter(loop_process_count[i].items()), key=lambda a: a[1]))) lopt = " ".join([str((str(k), v)) for k, v in d[:5]]) if len(d) > 5: lopt += " ..." - print >> stream, blanc, (' %2d - %.3fs %d (%.3fs in global opts, ' + print(blanc, (' %2d - %.3fs %d (%.3fs in global opts, ' '%.3fs io_toposort) - %d nodes - %s' % ( i, loop_timing[i], sum(loop_process_count[i].values()), global_opt_timing[i], io_toposort_timing[i], nb_nodes[i], - lopt)) + lopt)), file=stream) count_opt = [] not_used = 0 @@ -1682,9 +1681,9 @@ for o in opt.global_RefactoringTool: Refactored ./theano/gof/op.py RefactoringTool: No changes to ./theano/gof/null_type.py RefactoringTool: Refactored ./theano/gof/link.py optimizers + opt.local_optimizers: process_count.setdefault(o, 0) for count in loop_process_count: - for o, v in count.iteritems(): + for o, v in count.items(): process_count[o] += v - for opt, count in process_count.iteritems(): + for opt, count in process_count.items(): if count > 0: count_opt.append((time_opts[opt], count, opt)) else: @@ -1692,15 +1691,15 @@ not_used_time += time_opts[opt] if count_opt: - print >> stream, blanc, \ - ' times - times applied - name:' + print(blanc, \ + ' times - times applied - name:', file=stream) count_opt.sort() for (t, count, opt) in count_opt[::-1]: - print >> stream, blanc, ' %.3fs - %d - %s' % ( - t, count, opt) - print >> stream, blanc, ' %.3fs - in %d optimization that where not used' % ( - not_used_time, not_used) - print >> stream + print(blanc, ' %.3fs - %d - %s' % ( + t, count, opt), file=stream) + print(blanc, ' %.3fs - in %d optimization that where not used' % ( + not_used_time, not_used), file=stream) + print(file=stream) @staticmethod def merge_profile(prof1, prof2): @@ -1729,7 +1728,7 @@ loop_process_count = prof1[2].copy() for i in range(len(loop_process_count)): process_count = loop_process_count[i] - for process, count in prof2[2][i].iteritems(): + for process, count in prof2[2][i].items(): if process in process_count: process_count[process] += count else: @@ -1744,7 +1743,7 @@ nb_nodes = merge_list(prof1[5], prof2[5]) time_opts = prof1[6].copy() - for opt, t in prof2[6].iteritems(): + for opt, t in prof2[6].items(): if opt in time_opts: time_opts[opt] += t else: --- ./theano/gof/op.py (original) +++ ./theano/gof/op.py (refactored) @@ -432,7 +432,7 @@ # copy the values of the inputs in destroy_map destroyed_inputs_idx = set() if getattr(node.op, 'destroy_map', None): - for i_pos_list in node.op.destroy_map.itervalues(): + for i_pos_list in node.op.destroy_map.values(): destroyed_inputs_idx.update(i_pos_list) for inp_idx in destroyed_inputs_idx: inp = node.inputs[inp_idx] --- ./theano/gof/link.py (original) +++ ./theano/gof/link.py (refactored) @@ -1,6 +1,6 @@ """WRITEME""" from copy import copy -import StringIO +import io import sys import traceback @@ -19,7 +19,7 @@ # in future, consider accepting `write` as arg rather than file # to support writing to a logger def write(msg): - print >> f, "log_thunk_trace: %s" % msg.strip() + print("log_thunk_trace: %s" % msg.strip(), file=f) if hasattr(value, '__thunk_trace__'): trace2 = value.__thunk_trace__ @@ -92,7 +92,7 @@ exc_type, exc_value, exc_trace = exc_info if exc_type == KeyboardInterrupt: # print a simple traceback from KeyboardInterrupt - raise exc_type, exc_value, exc_trace + raise exc_type(exc_value).with_traceback(exc_trace) try: trace = op.tag.trace except AttributeError: @@ -134,7 +134,7 @@ " have the inputs shapes and strides printed.") if theano.config.exception_verbosity == 'high': - f = StringIO.StringIO() + f = io.StringIO() theano.printing.debugprint(op, file=f, stop_on_name=True, print_type=True) detailed_err_msg += "\nDebugprint of the apply node: \n" + f.getvalue() @@ -143,7 +143,7 @@ " for a debugprint of this apply node.") exc_value = exc_type(RefactoringTool: Refactored ./theano/gof/lazylinker_c.py RefactoringTool: Refactored ./theano/gof/graph.py str(exc_value) + detailed_err_msg) - raise exc_type, exc_value, exc_trace + raise exc_type(exc_value).with_traceback(exc_trace) raise_with_op.print_thunk_trace = False @@ -277,7 +277,7 @@ else: self.storage[0] = self.type.filter(value, **kwargs) - except Exception, e: + except Exception as e: e.args = e.args + (('Container name "%s"' % self.name),) raise data = property(__get__, __set__) @@ -398,7 +398,7 @@ raise_with_op(node, thunk) f = streamline_default_f elif nice_errors: - thunk_node_list = zip(thunks, order) + thunk_node_list = list(zip(thunks, order)) def streamline_nice_errors_f(): for x in no_recycling: @@ -551,7 +551,7 @@ if no_recycling is True: # True seems like some special code for *everything*?? -JB # FunctionMaker always passes a list I think -JB - no_recycling = storage_map.values() + no_recycling = list(storage_map.values()) no_recycling = utils.difference(no_recycling, input_storage) else: no_recycling = [storage_map[r] for r in no_recycling if r not in fgraph.inputs] @@ -667,7 +667,7 @@ make_all += [l.make_all(**kwargs) for l in self.linkers[1:]] fns, input_lists, output_lists, thunk_lists, order_lists \ - = zip(*make_all) + = list(zip(*make_all)) order_list0 = order_lists[0] for order_list in order_lists[1:]: @@ -678,7 +678,7 @@ inputs0 = input_lists[0] outputs0 = output_lists[0] - thunk_groups = zip(*thunk_lists) + thunk_groups = list(zip(*thunk_lists)) order = [x[0] for x in zip(*order_lists)] to_reset = [] --- ./theano/gof/lazylinker_c.py (original) +++ ./theano/gof/lazylinker_c.py (refactored) @@ -36,7 +36,7 @@ try: # Try to make the location os.mkdir(location) - except OSError, e: + except OSError as e: # If we get an error, verify that the error was # 17, the path already exists, # and that it is a directory # Note: we can't check if it exists before making it, because we are not holding --- ./theano/gof/graph.py (original) +++ ./theano/gof/graph.py (refactored) @@ -336,7 +336,7 @@ if index is not None and not isinstance(index, int): raise TypeError("index must be an int", index) self.index = index - if name is not None and not isinstance(name, basestring): + if name is not None and not isinstance(name, str): raise TypeError("name must be a string", name) self.name = name @@ -416,7 +416,7 @@ inputs_to_values = {} if not hasattr(self, '_fn'): - self._fn_inputs = inputs_to_values.keys() + self._fn_inputs = list(inputs_to_values.keys()) self._fn = theano.function(self._fn_inputs, self) args = [inputs_to_values[param] for param in self._fn_inputs] @@ -764,9 +764,9 @@ if len(rlist) != len(reachable): if debug_print: - print '' - print reachable - print rlist + print('') + print(reachable) + print(rlist) raise ValueError('graph contains cycles') return rlist @@ -879,7 +879,7 @@ in_xs = [] in_ys = [] # Compute the sets of all variables found in each computational graph. - inputs_var = map(inputs, ([var1], [var2])) + inputs_var = list(map(inputs, ([var1], [var2]))) all_vars = [set(variables(v_i, v_o)) for v_i, v_o in ((inputs_var[0], [var1]), (inputs_var[1], [var2]))] @@ -888,7 +888,7 @@ # Return True iff `x` is in computation graph of variable `vark`. return x in all_vars[k - 1] - for to_replace, replace_by in givens.iteritems(): + for to_replace, replace_by in givenRefactoringTool: Refactored ./theano/gof/fg.py s.items(): # Map a substitution variable to the computational graphs it # belongs to. inside = dict((v, [in_var(v, k) for k in (1, 2)]) @@ -1017,7 +1017,7 @@ if owner is not None: try: view_map = owner.op.view_map - view_map = dict([(owner.outputs[o], i) for o, i in view_map.items()]) + view_map = dict([(owner.outputs[o], i) for o, i in list(view_map.items())]) except AttributeError: return [r] if r in view_map: --- ./theano/gof/fg.py (original) +++ ./theano/gof/fg.py (refactored) @@ -155,13 +155,13 @@ raise Exception("%s is already owned by another fgraph" % node) if (hasattr(node.op, 'view_map') and not all([isinstance(view, (list, tuple)) - for view in node.op.view_map.values()])): + for view in list(node.op.view_map.values())])): raise Exception("Op '%s' have a bad view map '%s'," " the values must be tuples or lists." % ( str(node.op), str(node.op.view_map))) if (hasattr(node.op, 'destroy_map') and not all([isinstance(destroy, (list, tuple)) - for destroy in node.op.destroy_map.values()])): + for destroy in list(node.op.destroy_map.values())])): raise Exception("Op '%s' have a bad destroy map '%s'," " the values must be tuples or lists." % ( str(node.op), str(node.op.destroy_map))) @@ -209,11 +209,11 @@ Updates the list of clients of r with new_clients. """ if set(r.clients).intersection(set(new_clients)): - print >> sys.stderr, 'ERROR: clients intersect!' - print >> sys.stderr, ' RCLIENTS of', r, [(n, i, type(n), id(n)) - for n, i in r.clients] - print >> sys.stderr, ' NCLIENTS of', r, [(n, i, type(n), id(n)) - for n, i in new_clients] + print('ERROR: clients intersect!', file=sys.stderr) + print(' RCLIENTS of', r, [(n, i, type(n), id(n)) + for n, i in r.clients], file=sys.stderr) + print(' NCLIENTS of', r, [(n, i, type(n), id(n)) + for n, i in new_clients], file=sys.stderr) assert not set(r.clients).intersection(set(new_clients)) r.clients += new_clients @@ -228,9 +228,9 @@ for entry in clients_to_remove: r.clients.remove(entry) if entry in r.clients: - print >> sys.stderr, 'ERROR: DUPLICATE CLIENT ENTRY...' - print >> sys.stderr, ' ENTRY', repr(entry), type(entry[0]) - print >> sys.stderr, ' CLIENTS', repr(r.clients) + print('ERROR: DUPLICATE CLIENT ENTRY...', file=sys.stderr) + print(' ENTRY', repr(entry), type(entry[0]), file=sys.stderr) + print(' CLIENTS', repr(r.clients), file=sys.stderr) assert entry not in r.clients # an op,i pair should be unique if not r.clients: if prune: @@ -243,7 +243,7 @@ def __import_r__(self, variables, reason): global NullType if NullType is None: - from null_type import NullType + from .null_type import NullType # Imports the owners of the variables for apply_node in [r.owner for r in variables if r.owner is not None]: if apply_node not in self.apply_nodes: @@ -442,7 +442,7 @@ if verbose is None: verbose = config.optimizer_verbose if verbose: - print reason, r, new_r + print(reason, r, new_r) if r.fgraph is not self: raise Exception("Cannot replace %s because it does not belong to this FunctionGraph" % r, str(reason)) if not r.type == new_r.RefactoringTool: Refactored ./theano/gof/destroyhandler.py type: @@ -620,7 +620,7 @@ str(feature.orderings) + ". Nondeterministic object is " + str(orderings)) - for node, prereqs in orderings.items(): + for node, prereqs in list(orderings.items()): if not isinstance(prereqs, (list, OrderedSet)): raise TypeError( "prereqs must be a type with a " @@ -628,7 +628,7 @@ " will be non-deterministic.") ords.setdefault(node, []).extend(prereqs) # eliminate duplicate prereqs - for (node, prereqs) in ords.items(): + for (node, prereqs) in list(ords.items()): ords[node] = list(OrderedSet(prereqs)) return ords --- ./theano/gof/destroyhandler.py (original) +++ ./theano/gof/destroyhandler.py (refactored) @@ -3,13 +3,13 @@ and inplace operations. """ import theano -import toolbox -import graph +from . import toolbox +from . import graph from theano.gof.python25 import deque from theano.gof.python25 import OrderedDict from theano.misc.ordered_set import OrderedSet -from fg import InconsistencyError +from .fg import InconsistencyError class ProtocolError(Exception): """Raised when FunctionGraph calls DestroyHandler callbacks in @@ -345,7 +345,7 @@ root_destroyer = {} # root -> destroyer apply for app in self.destroyers: - for output_idx, input_idx_list in app.op.destroy_map.items(): + for output_idx, input_idx_list in list(app.op.destroy_map.items()): if len(input_idx_list) != 1: raise NotImplementedError() input_idx = input_idx_list[0] @@ -392,7 +392,7 @@ self.destroyers.add(app) # add this symbol to the forward and backward maps - for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', {}).items()): if len(i_idx_list) > 1: raise NotImplementedError('destroying this output invalidates multiple inputs', (app.op)) o = app.outputs[o_idx] @@ -427,7 +427,7 @@ # deleted on_detach(). #UPDATE self.view_i, self.view_o - for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', {}).items()): if len(i_idx_list) > 1: #destroying this output invalidates multiple inputs raise NotImplementedError() @@ -460,7 +460,7 @@ self.clients[new_r][app] += 1 #UPDATE self.view_i, self.view_o - for o_idx, i_idx_list in getattr(app.op, 'view_map', {}).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', {}).items()): if len(i_idx_list) > 1: #destroying this output invalidates multiple inputs raise NotImplementedError() @@ -529,7 +529,7 @@ # add destroyed variable clients as computational dependencies for app in self.destroyers: # for each destroyed input... - for output_idx, input_idx_list in app.op.destroy_map.items(): + for output_idx, input_idx_list in list(app.op.destroy_map.items()): destroyed_idx = input_idx_list[0] destroyed_variable = app.inputs[destroyed_idx] root = droot[destroyed_variable] @@ -592,8 +592,8 @@ # depend on destroyed_input root_clients = OrderedSet() for r in root_impact: - assert not [a for a,c in self.clients[r].items() if not c] - root_clients.update([a for a,RefactoringTool: Refactored ./theano/gof/cutils.py RefactoringTool: Refactored ./theano/gof/compilelock.py c in self.clients[r].items() if c]) + assert not [a for a,c in list(self.clients[r].items()) if not c] + root_clients.update([a for a,c in list(self.clients[r].items()) if c]) root_clients.remove(app) if root_clients: rval[app] = root_clients @@ -731,7 +731,7 @@ root_destroyer = OrderedDict() # root -> destroyer apply for app in self.destroyers: - for output_idx, input_idx_list in app.op.destroy_map.items(): + for output_idx, input_idx_list in list(app.op.destroy_map.items()): if len(input_idx_list) != 1: raise NotImplementedError() input_idx = input_idx_list[0] @@ -777,7 +777,7 @@ self.destroyers.add(app) # add this symbol to the forward and backward maps - for o_idx, i_idx_list in getattr(app.op, 'view_map', OrderedDict()).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', OrderedDict()).items()): if len(i_idx_list) > 1: raise NotImplementedError('destroying this output invalidates multiple inputs', (app.op)) o = app.outputs[o_idx] @@ -812,7 +812,7 @@ # deleted on_detach(). #UPDATE self.view_i, self.view_o - for o_idx, i_idx_list in getattr(app.op, 'view_map', OrderedDict()).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', OrderedDict()).items()): if len(i_idx_list) > 1: #destroying this output invalidates multiple inputs raise NotImplementedError() @@ -845,7 +845,7 @@ self.clients[new_r][app] += 1 #UPDATE self.view_i, self.view_o - for o_idx, i_idx_list in getattr(app.op, 'view_map', OrderedDict()).items(): + for o_idx, i_idx_list in list(getattr(app.op, 'view_map', OrderedDict()).items()): if len(i_idx_list) > 1: #destroying this output invalidates multiple inputs raise NotImplementedError() @@ -913,7 +913,7 @@ # add destroyed variable clients as computational dependencies for app in self.destroyers: # for each destroyed input... - for output_idx, input_idx_list in app.op.destroy_map.items(): + for output_idx, input_idx_list in list(app.op.destroy_map.items()): destroyed_idx = input_idx_list[0] destroyed_variable = app.inputs[destroyed_idx] root = droot[destroyed_variable] @@ -976,8 +976,8 @@ # depend on destroyed_input root_clients = OrderedSet() for r in root_impact: - assert not [a for a, c in self.clients[r].items() if not c] - root_clients.update([a for a, c in self.clients[r].items() if c]) + assert not [a for a, c in list(self.clients[r].items()) if not c] + root_clients.update([a for a, c in list(self.clients[r].items()) if c]) root_clients.remove(app) if root_clients: rval[app] = root_clients --- ./theano/gof/cutils.py (original) +++ ./theano/gof/cutils.py (refactored) @@ -4,7 +4,7 @@ from theano.compat import PY3 from theano.gof.compilelock import get_lock, release_lock from theano import config -import cmodule +from . import cmodule # TODO These two lines may be removed in the future, when we are 100% sure # noone has an old cutils_ext.so lying around anymore. --- ./theano/gof/compilelock.py (original) +++ ./theano/gof/compilelock.py (refactored) @@ -253,7 +253,7 @@ # We got the lock, hoorray! return - except Exception, e: + except Exception as e: # If something wrong happened, we try again. _logger.warning("SomethiRefactoringTool: Refactored ./theano/gof/compiledir.py ng wrong happened: %s %s", type(e), e) nb_error += 1 --- ./theano/gof/compiledir.py (original) +++ ./theano/gof/compiledir.py (refactored) @@ -1,4 +1,4 @@ -import cPickle +import pickle import errno import logging import os @@ -116,8 +116,8 @@ " or listing permissions." % path) else: try: - os.makedirs(path, 0770) # read-write-execute for user and group - except OSError, e: + os.makedirs(path, 0o770) # read-write-execute for user and group + except OSError as e: # Maybe another parallel execution of theano was trying to create # the same directory at the same time. if e.errno != errno.EEXIST: @@ -202,7 +202,7 @@ file = open(filename, 'rb') #print file try: - keydata = cPickle.load(file) + keydata = pickle.load(file) for key in list(keydata.keys): have_npy_abi_version = False have_c_compiler = False @@ -212,7 +212,7 @@ #force the removing of key have_npy_abi_version = False break - elif isinstance(obj, basestring): + elif isinstance(obj, str): if obj.startswith('NPY_ABI_VERSION=0x'): have_npy_abi_version = True elif obj.startswith('c_compiler_str='): @@ -232,7 +232,7 @@ if keydata.key_pkl != filename: keydata.key_pkl = filename keydata.remove_key(key) - except IOError, e: + except IOError as e: _logger.error( "Could not remove file '%s'. To complete " "the clean-up, please remove manually " @@ -273,7 +273,7 @@ try: filename = os.path.join(compiledir, dir, "key.pkl") file = open(filename, 'rb') - keydata = cPickle.load(file) + keydata = pickle.load(file) ops = list(set([x for x in flatten(keydata.keys) if isinstance(x, theano.gof.Op)])) if len(ops) == 0: @@ -298,46 +298,46 @@ if file is not None: file.close() - print "List of %d compiled individual ops in this theano cache %s:" % ( - len(table), compiledir) - print "sub directory/Op/a set of the different associated Theano type" + print("List of %d compiled individual ops in this theano cache %s:" % ( + len(table), compiledir)) + print("sub directory/Op/a set of the different associated Theano type") table = sorted(table, key=lambda t: str(t[1])) table_op_class = {} for dir, op, types in table: - print dir, op, types + print(dir, op, types) table_op_class.setdefault(op.__class__, 0) table_op_class[op.__class__] += 1 - print - print ("List of %d individual compiled Op classes and " - "the number of times they got compiled" % len(table_op_class)) - table_op_class = sorted(table_op_class.iteritems(), key=lambda t: t[1]) + print() + print(("List of %d individual compiled Op classes and " + "the number of times they got compiled" % len(table_op_class))) + table_op_class = sorted(iter(table_op_class.items()), key=lambda t: t[1]) for op_class, nb in table_op_class: - print op_class, nb + print(op_class, nb) if big_key_files: big_key_files = sorted(big_key_files, key=lambda t: str(t[1])) big_total_size = sum([size for dir, size, ops in big_key_files]) - print ("There are directories with key files bigger than %d bytes " + print(("There are directorRefactoringTool: Refactored ./theano/gof/cmodule.py ies with key files bigger than %d bytes " "(they probably contain big tensor constants)" % - max_key_file_size) - print ("They use %d bytes out of %d (total size used by all key files)" - "" % (big_total_size, total_key_sizes)) + max_key_file_size)) + print(("They use %d bytes out of %d (total size used by all key files)" + "" % (big_total_size, total_key_sizes))) for dir, size, ops in big_key_files: - print dir, size, ops - - nb_keys = sorted(nb_keys.iteritems()) - print - print "Number of keys for a compiled module" - print "number of keys/number of modules with that number of keys" + print(dir, size, ops) + + nb_keys = sorted(nb_keys.items()) + print() + print("Number of keys for a compiled module") + print("number of keys/number of modules with that number of keys") for n_k, n_m in nb_keys: - print n_k, n_m - - print ("Skipped %d files that contained more than" - " 1 op (was compiled with the C linker)" % more_than_one_ops) - print ("Skipped %d files that contained 0 op " - "(are they always theano.scalar ops?)" % zeros_op) + print(n_k, n_m) + + print(("Skipped %d files that contained more than" + " 1 op (was compiled with the C linker)" % more_than_one_ops)) + print(("Skipped %d files that contained 0 op " + "(are they always theano.scalar ops?)" % zeros_op)) def compiledir_purge(): @@ -356,18 +356,18 @@ subdirs = sorted(subdirs) others = sorted(others) - print 'Base compile dir is %s' % theano.config.base_compiledir - print 'Sub-directories (possible compile caches):' + print('Base compile dir is %s' % theano.config.base_compiledir) + print('Sub-directories (possible compile caches):') for d in subdirs: - print ' %s' % d + print(' %s' % d) if not subdirs: - print ' (None)' + print(' (None)') if others: - print - print 'Other files in base_compiledir:' + print() + print('Other files in base_compiledir:') for f in others: - print ' %s' % f + print(' %s' % f) def basecompiledir_purge(): --- ./theano/gof/cmodule.py (original) +++ ./theano/gof/cmodule.py (refactored) @@ -1,7 +1,7 @@ """Generate and compile C modules for Python, """ import atexit -import cPickle +import pickle import logging import operator import os @@ -85,7 +85,7 @@ setattr(debug_counter, name, getattr(debug_counter, name, 0) + 1) n = getattr(debug_counter, name) if n % every == 0: - print >>sys.stderr, "debug_counter [%s]: %s" % (name, n) + print("debug_counter [%s]: %s" % (name, n), file=sys.stderr) class ExtFunction(object): @@ -146,15 +146,15 @@ self.init_blocks = [] def print_methoddef(self, stream): - print >> stream, "static PyMethodDef MyMethods[] = {" + print("static PyMethodDef MyMethods[] = {", file=stream) for f in self.functions: - print >> stream, f.method_decl(), ',' - print >> stream, "\t{NULL, NULL, 0, NULL}" - print >> stream, "};" + print(f.method_decl(), ',', file=stream) + print("\t{NULL, NULL, 0, NULL}", file=stream) + print("};", file=stream) def print_init(self, stream): if PY3: - print >> stream, """\ + print("""\ static struct PyModuleDef moduledef = {{ PyModuleDef_HEAD_INIT, "{name}", @@ -162,21 +162,21 @@ -1, MyMethods, }}; -""".format(name=self.hash_placeholder) - print >> stream, ("PyMODINIT_FUNC PyInit_%s(void) {" % - self.hash_placeholder) +""".format(name=self.hash_placeholder), file=stream) + print(("PyMODINIT_FUNC PyInit_%s(void) {" % + self.hash_placeholder), file=stream) for block in self.init_blocks: - print >> stream, ' ', block - print >> stream, " PyObject *m = PyModule_Create(&moduledef);" - print >> stream, " return m;" + print(' ', block, file=stream) + print(" PyObject *m = PyModule_Create(&moduledef);", file=stream) + print(" return m;", file=stream) else: - print >> stream, ("PyMODINIT_FUNC init%s(void){" % - self.hash_placeholder) + print(("PyMODINIT_FUNC init%s(void){" % + self.hash_placeholder), file=stream) for block in self.init_blocks: - print >> stream, ' ', block - print >> stream, ' ', ('(void) Py_InitModule("%s", MyMethods);' - % self.hash_placeholder) - print >> stream, "}" + print(' ', block, file=stream) + print(' ', ('(void) Py_InitModule("%s", MyMethods);' + % self.hash_placeholder), file=stream) + print("}", file=stream) def add_include(self, str): assert not self.finalized @@ -201,25 +201,25 @@ if not inc: continue if inc[0] == '<' or inc[0] == '"': - print >> sio, "#include", inc + print("#include", inc, file=sio) else: - print >> sio, '#include "%s"' % inc - - print >> sio, "//////////////////////" - print >> sio, "//// Support Code" - print >> sio, "//////////////////////" + print('#include "%s"' % inc, file=sio) + + print("//////////////////////", file=sio) + print("//// Support Code", file=sio) + print("//////////////////////", file=sio) for sc in self.support_code: - print >> sio, sc - - print >> sio, "//////////////////////" - print >> sio, "//// Functions" - print >> sio, "//////////////////////" + print(sc, file=sio) + + print("//////////////////////", file=sio) + print("//// Functions", file=sio) + print("//////////////////////", file=sio) for f in self.functions: - print >> sio, f.code_block - - print >> sio, "//////////////////////" - print >> sio, "//// Module init" - print >> sio, "//////////////////////" + print(f.code_block, file=sio) + + print("//////////////////////", file=sio) + print("//// Module init", file=sio) + print("//////////////////////", file=sio) self.print_methoddef(sio) self.print_init(sio) @@ -235,7 +235,7 @@ def list_code(self, ofile=sys.stdout): """Print out the code with line numbers to `ofile` """ for i, line in enumerate(self.code().split('\n')): - print >> ofile, ('%4i' % (i + 1)), line + print(('%4i' % (i + 1)), line, file=ofile) ofile.flush() #TODO: add_type @@ -362,10 +362,10 @@ # it changes, then the module hash should be different. # We start with the source code itself (stripping blanks might avoid # recompiling after a basic indentation fix for instance). - to_hash = map(str.strip, src_code.split('\n')) + to_hash = list(map(str.strip, src_code.split('\n'))) # Get the version part of the key (ignore if unversioned). if key[0]: - to_hash += map(str, key[0]) + to_hash += list(map(str, key[0])) c_link_key = key[1] # Currently, in order to catch potential bugs early, we are very # convervative about the structure of the key and raise an exception @@ -385,7 +385,7 @@ # This should be the C++ compilation command line parameters or the # libraries to link against. to_hash += list(key_element) - elif isinstance(key_element, basestring): + elif isinstance(key_element, str): if key_element.startswith('md5:'): # This is the md5 hash of the config options. We can stop # here. @@ -418,7 +418,7 @@ # Find the md5 hash part. c_link_key = key[1] for key_element in c_link_key[1:]: - if (isinstance(key_element, basestring) + if (isinstance(key_element, str) and key_element.startswith('md5:')): md5 = key_element[4:] break @@ -469,9 +469,9 @@ """ # Note that writing in binary mode is important under Windows. try: - cPickle.dump(self, open(self.key_pkl, 'wb'), - protocol=cPickle.HIGHEST_PROTOCOL) - except cPickle.PicklingError: + pickle.dump(self, open(self.key_pkl, 'wb'), + protocol=pickle.HIGHEST_PROTOCOL) + except pickle.PicklingError: _logger.warning("Cache leak due to unpickle-able key data %s", self.keys) os.remove(self.key_pkl) @@ -499,7 +499,7 @@ del entry_from_key[key] if do_manual_check: to_del = [] - for key, key_entry in entry_from_key.iteritems(): + for key, key_entry in entry_from_key.items(): if key_entry == entry: to_del.append(key) for key in to_del: @@ -671,7 +671,7 @@ "unpickle cache file %s", key_pkl) try: - key_data = cPickle.load(open(key_pkl, 'rb')) + key_data = pickle.load(open(key_pkl, 'rb')) except EOFError: # Happened once... not sure why (would be worth # investigating if it ever happens again). @@ -818,7 +818,7 @@ del root, dirs, files # Remove entries that are not in the filesystem. - items_copy = list(self.module_hash_to_key_data.iteritems()) + items_copy = list(self.module_hash_to_key_data.items()) for module_hash, key_data in items_copy: entry = key_data.get_entry() try: @@ -924,7 +924,7 @@ # (cannot do try / except / finally). try: location = dlimport_workdir(self.dirname) - except OSError, e: + except OSError as e: _logger.error(e) if e.errno == 31: _logger.error('There are %i files in %s', @@ -968,7 +968,7 @@ try: key_data.add_key(key, save_pkl=bool(_version)) key_broken = False - except cPickle.PicklingError: + except pickle.PicklingError: # This should only happen if we tried to save the # pickled file. assert _version @@ -1027,7 +1027,7 @@ try: key_data.save_pkl() key_broken = False - except cPickle.PicklingError: + except pickle.PicklingError: key_broken = True # Remove key from the KeyData object, to make # sure we never try to save it again. @@ -1117,7 +1117,7 @@ # Verify that when we reload the KeyData from the pickled file, the # same key can be found in it, and is not equal to more than one # other key. - key_data = cPickle.load(open(key_pkl, 'rb')) + key_data = pickle.load(open(key_pkl, 'rb')) found = sum(key == other_key for other_key in key_data.keys) msg = '' if found == 0: @@ -1278,7 +1278,7 @@ min_age = self.age_thresh_del_unversioned compilelock.get_lock() - all_key_datas = self.module_hash_to_key_data.values() + all_key_datas = list(self.module_hash_to_key_data.values()) try: for key_data in all_key_datas: if not key_data.keys: @@ -1RefactoringTool: Refactored ./theano/gof/cc.py 382,7 +1382,7 @@ log_msg += ' (%s)' % msg _logger.log(level, '%s: %s', log_msg, parent) shutil.rmtree(parent) - except Exception, e: + except Exception as e: # If parent still exists, mark it for deletion by a future refresh() _logger.debug('In _rmtree, encountered exception: %s(%s)', type(e), e) @@ -1390,7 +1390,7 @@ try: _logger.info('placing "delete.me" in %s', parent) open(os.path.join(parent, 'delete.me'), 'w').close() - except Exception, ee: + except Exception as ee: _logger.warning("Failed to remove or mark cache directory %s " "for removal %s", parent, ee) @@ -1470,10 +1470,10 @@ for f, lib in [('libpython27.a', 'libpython 1.2'), ('libmsvcr90.a', 'mingw 4.5.2')]: if not os.path.exists(os.path.join(libdir, f)): - print ("Your Python version is from Canopy. " + + print(("Your Python version is from Canopy. " + "You need to install the package '" + lib + "' from Canopy package manager." - ) + )) python_lib_dirs.insert(0, libdir) return [libname], python_lib_dirs @@ -1823,7 +1823,7 @@ finally: os.remove(path) - except OSError, e: + except OSError as e: compilation_ok = False if not try_run: @@ -1940,10 +1940,10 @@ def print_command_line_error(): # Print command line when a problem occurred. - print >> sys.stderr, ( + print(( "Problem occurred during compilation with the " - "command line below:") - print >> sys.stderr, ' '.join(cmd) + "command line below:"), file=sys.stderr) + print(' '.join(cmd), file=sys.stderr) try: p = call_subprocess_Popen(cmd, stderr=subprocess.PIPE) @@ -1956,14 +1956,14 @@ status = p.returncode if status: - print '===============================' + print('===============================') for i, l in enumerate(src_code.split('\n')): #gcc put its messages to stderr, so we add ours now - print >> sys.stderr, '%05i\t%s' % (i + 1, l) - print '===============================' + print('%05i\t%s' % (i + 1, l), file=sys.stderr) + print('===============================') print_command_line_error() # Print errors just below the command line. - print compile_stderr + print(compile_stderr) # We replace '\n' by '. ' in the error message because when Python # prints the exception, having '\n' in the text makes it more # difficult to read. @@ -1971,7 +1971,7 @@ (status, compile_stderr.replace('\n', '. '))) elif config.cmodule.compilation_warning and compile_stderr: # Print errors just below the command line. - print compile_stderr + print(compile_stderr) if py_module: #touch the __init__ file --- ./theano/gof/cc.py (original) +++ ./theano/gof/cc.py (refactored) @@ -6,7 +6,7 @@ from copy import copy import os import sys -from itertools import izip + import numpy @@ -628,7 +628,7 @@ pass else: # The following will be executed if the "try" block succeeds - assert isinstance(c_support_code_apply[-1], basestring), ( + assert isinstance(c_support_code_apply[-1], str), ( str(node.op) + " didn't returned a string for c_support_code_apply") @@ -637,7 +637,7 @@ behavior = op.c_code(node, name, isyms, osyms, sub) except utils.MethodNotDefined: raise NotImplementedError("%s cannot produce C code" % op) - assert isinstance(behavior, basestring), ( + assert isinstance(behavior, str), ( str(node.op) + " didn't returned a string for c_code") try: @@ -679,10 +679,10 @@ self.c_support_code_apply = c_support_code_apply if (self.init_tasks, self.tasks) != self.get_init_tasks(): - print >> sys.stderr, "init_tasks\n", self.init_tasks - print >> sys.stderr, self.get_init_tasks()[0] - print >> sys.stderr, "tasks\n", self.tasks - print >> sys.stderr, self.get_init_tasks()[1] + print("init_tasks\n", self.init_tasks, file=sys.stderr) + print(self.get_init_tasks()[0], file=sys.stderr) + print("tasks\n", self.tasks, file=sys.stderr) + print(self.get_init_tasks()[1], file=sys.stderr) assert (self.init_tasks, self.tasks) == self.get_init_tasks() # List of indices that should be ignored when passing the arguments @@ -891,9 +891,9 @@ keep_lock=keep_lock) return (thunk, [link.Container(input, storage) for input, storage in - izip(self.fgraph.inputs, input_storage)], + zip(self.fgraph.inputs, input_storage)], [link.Container(output, storage, True) for output, storage in - izip(self.fgraph.outputs, output_storage)], + zip(self.fgraph.outputs, output_storage)], error_storage) def get_init_tasks(self): @@ -1235,7 +1235,7 @@ lib_dirs=self.lib_dirs(), libs=libs, preargs=preargs) - except Exception, e: + except Exception as e: e.args += (str(self.fgraph),) raise finally: @@ -1340,26 +1340,26 @@ def instantiate_code(self, n_args): code = StringIO() struct_name = self.struct_name - print >> code, "static PyObject * instantiate(PyObject * self, PyObject *argtuple) {" - print >> code, ' assert(PyTuple_Check(argtuple));' - print >> code, ' if (%(n_args)i != PyTuple_Size(argtuple)){ ' % locals() - print >> code, ' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));' % locals() - print >> code, ' return NULL;' - print >> code, ' }' - print >> code, ' %(struct_name)s* struct_ptr = new %(struct_name)s();' % locals() - print >> code, ' struct_ptr->init(', ','.join('PyTuple_GET_ITEM(argtuple, %i)' % n for n in xrange(n_args)), ');' + print("static PyObject * instantiate(PyObject * self, PyObject *argtuple) {", file=code) + print(' assert(PyTuple_Check(argtuple));', file=code) + print(' if (%(n_args)i != PyTuple_Size(argtuple)){ ' % locals(), file=code) + print(' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));' % locals(), file=code) + print(' return NULL;', file=code) + print(' }', file=code) + print(' %(struct_name)s* struct_ptr = new %(struct_name)s();' % locals(), file=code) + print(' struct_ptr->init(', ','.join('PyTuple_GET_ITEM(argtuple, %i)' % n for n in range(n_args)), ');', file=code) if PY3: - print >> code, """\ + print("""\ PyObject* thunk = PyCapsule_New((void*)(&{struct_name}_executor), NULL, {struct_name}_destructor); if (thunk != NULL && PyCapsule_SetContext(thunk, struct_ptr) != 0) {{ PyErr_Clear(); Py_DECREF(thunk); thunk = NULL; }} -""".format(**locals()) +""".format(**locals()), file=code) else: - print >> code, ' PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&%(struct_name)s_executor), struct_ptr, %(struct_name)s_destructor);' % locals() - priRefactoringTool: Refactored ./theano/gof/callcache.py nt >> code, " return thunk; }" + print(' PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&%(struct_name)s_executor), struct_ptr, %(struct_name)s_destructor);' % locals(), file=code) + print(" return thunk; }", file=code) return code.getvalue() @@ -1415,12 +1415,12 @@ exc_value = exc_type(_exc_value) exc_value.__thunk_trace__ = trace except Exception: - print >> sys.stderr, ('ERROR retrieving error_storage.' - ' Was the error set in the c code?'), - print >> sys.stderr, self.error_storage + print(('ERROR retrieving error_storage.' + ' Was the error set in the c code?'), end=' ', file=sys.stderr) + print(self.error_storage, file=sys.stderr) raise - raise exc_type, exc_value, exc_trace + raise exc_type(exc_value).with_traceback(exc_trace) class OpWiseCLinker(link.LocalLinker): @@ -1531,7 +1531,7 @@ node == last_user[input])]) if no_recycling is True: - no_recycling = storage_map.values() + no_recycling = list(storage_map.values()) no_recycling = utils.difference(no_recycling, input_storage) else: no_recycling = [storage_map[r] @@ -1552,9 +1552,9 @@ return (f, [link.Container(input, storage) - for input, storage in izip(fgraph.inputs, input_storage)], + for input, storage in zip(fgraph.inputs, input_storage)], [link.Container(output, storage, True) - for output, storage in izip(fgraph.outputs, output_storage)], + for output, storage in zip(fgraph.outputs, output_storage)], thunks, order) @@ -1634,23 +1634,23 @@ no_recycling=no_recycling).make_all(**kwargs)) def f(): - for input1, input2 in izip(i1, i2): + for input1, input2 in zip(i1, i2): # Set the inputs to be the same in both branches. # The copy is necessary in order for inplace ops not to # interfere. input2.storage[0] = copy(input1.storage[0]) - for thunk1, thunk2, node1, node2 in izip(thunks1, thunks2, + for thunk1, thunk2, node1, node2 in zip(thunks1, thunks2, order1, order2): - for output, storage in izip(node1.outputs, thunk1.outputs): + for output, storage in zip(node1.outputs, thunk1.outputs): if output in no_recycling: storage[0] = None - for output, storage in izip(node2.outputs, thunk2.outputs): + for output, storage in zip(node2.outputs, thunk2.outputs): if output in no_recycling: storage[0] = None try: thunk1() thunk2() - for output1, output2 in izip(thunk1.outputs, + for output1, output2 in zip(thunk1.outputs, thunk2.outputs): self.checker(output1, output2) except Exception: --- ./theano/gof/callcache.py (original) +++ ./theano/gof/callcache.py (refactored) @@ -1,4 +1,4 @@ -import cPickle, logging +import pickle, logging _logger=logging.getLogger("theano.gof.callcache") @@ -9,7 +9,7 @@ if filename is None: raise IOError('bad filename') #just goes to except f = open(filename, 'r') - self.cache = cPickle.load(f) + self.cache = pickle.load(f) f.close() except IOError: self.cache = {} @@ -21,7 +21,7 @@ #backport #filename = self.filename if filename is None else filename RefactoringTool: No changes to ./theano/gof/__init__.py RefactoringTool: No changes to ./theano/generated_version.py RefactoringTool: Refactored ./theano/configparser.py RefactoringTool: No changes to ./theano/configdefaults.py RefactoringTool: No changes to ./theano/compile/tests/test_shared.py RefactoringTool: Refactored ./theano/compile/tests/test_pfunc.py RefactoringTool: Refactored ./theano/compile/tests/test_monitormode.py f = open(filename, 'w') - cPickle.dump(self.cache, f) + pickle.dump(self.cache, f) f.close() def call(self, fn, args=(), key=None): @@ -41,5 +41,5 @@ try: if self.filename: self.persist() - except Exception, e: + except Exception as e: _logger.error('persist failed %s %s', self.filename, e) --- ./theano/configparser.py (original) +++ ./theano/configparser.py (refactored) @@ -11,6 +11,7 @@ import theano from theano.compat import configparser as ConfigParser +import collections _logger = logging.getLogger('theano.configparser') @@ -125,10 +126,10 @@ def _config_print(thing, buf): for cv in _config_var_list: - print >> buf, cv - print >> buf, " Doc: ", cv.doc - print >> buf, " Value: ", cv.__get__() - print >> buf, "" + print(cv, file=buf) + print(" Doc: ", cv.doc, file=buf) + print(" Value: ", cv.__get__(), file=buf) + print("", file=buf) def get_config_md5(): @@ -229,7 +230,7 @@ configparam.in_c_key = in_c_key # Trigger a read of the value from config files and env vars # This allow to filter wrong value from the user. - if not callable(configparam.default): + if not isinstance(configparam.default, collections.Callable): configparam.__get__() else: # We do not want to evaluate now the default value when it is a callable. @@ -268,7 +269,7 @@ try: val_str = fetch_val_for_key(self.fullname) except KeyError: - if callable(self.default): + if isinstance(self.default, collections.Callable): val_str = self.default() else: val_str = self.default @@ -295,7 +296,7 @@ # All options should be strings for val in self.all: - if not isinstance(val, basestring): + if not isinstance(val, str): raise ValueError('Valid values for an EnumStr parameter ' 'should be strings', val, type(val)) @@ -320,7 +321,7 @@ def filter(val): cast_val = mytype(val) - if callable(is_valid): + if isinstance(is_valid, collections.Callable): if is_valid(cast_val): return cast_val else: --- ./theano/compile/tests/test_pfunc.py (original) +++ ./theano/compile/tests/test_pfunc.py (refactored) @@ -77,7 +77,7 @@ try: pfunc([w], theano.tensor.sum(w * w)) assert False - except TypeError, e: + except TypeError as e: msg = 'Cannot use a shared variable (w) as explicit input' if str(e).find(msg) < 0: raise --- ./theano/compile/tests/test_monitormode.py (original) +++ ./theano/compile/tests/test_monitormode.py (refactored) @@ -12,10 +12,10 @@ def detect_nan(i, node, fn): for output in fn.outputs: if numpy.isnan(output[0]).any(): - print '*** NaN detected ***' + print('*** NaN detected ***') theano.printing.debugprint(node) - print 'Inputs : %s' % [input[0] for input in fn.inputs] - print 'Outputs: %s' % [output[0] for output in fn.outputs] + print('Inputs : %s' % [input[0] for input in fn.inputs]) + print('Outputs: %s' % [output[0] for output in fn.outputs]) nan_detected[0] = True break @@ -36,10 +36,10 @@ def detect_nan(i, node, fn): for output in fn.outputs: if numpy.isnan(output[0]).any(): - print '*** NaN detected ***' + print('*** NaN detected ***') theano.printing.debugprint(node) - print 'Inputs : %s' % [input[0] for input in fn.inputs] - print 'Outputs: %s' % [output[0] for output in fn.outputs] + print('Inputs : %s' RefactoringTool: Refactored ./theano/compile/tests/test_module.py % [input[0] for input in fn.inputs]) + print('Outputs: %s' % [output[0] for output in fn.outputs]) nan_detected[0] = True break @@ -65,10 +65,10 @@ def detect_nan(i, node, fn): for output in fn.outputs: if numpy.isnan(output[0]).any(): - print '*** NaN detected ***' + print('*** NaN detected ***') theano.printing.debugprint(node) - print 'Inputs : %s' % [input[0] for input in fn.inputs] - print 'Outputs: %s' % [output[0] for output in fn.outputs] + print('Inputs : %s' % [input[0] for input in fn.inputs]) + print('Outputs: %s' % [output[0] for output in fn.outputs]) nan_detected[0] = True break --- ./theano/compile/tests/test_module.py (original) +++ ./theano/compile/tests/test_module.py (refactored) @@ -3,7 +3,7 @@ __docformat__ = "restructuredtext en" -import cPickle, numpy, unittest +import pickle, numpy, unittest from theano import config from theano.compat import exc_message @@ -116,7 +116,7 @@ inst.tdy[0]['y']=-6 inst.ddx['x']['x']=-7 inst.ddy['y']['y']=-8 - for i,j in zip(get_l2(),range(len(get_l2()))): + for i,j in zip(get_l2(),list(range(len(get_l2())))): i[0]=j assert inst.x==-1 assert inst.y==-2 @@ -126,7 +126,7 @@ assert inst.tdy[0]['y']==-6 assert inst.ddx['x']['x']==-7 assert inst.ddy['y']['y']==-8 - for i,j in zip(get_l2(),range(len(get_l2()))): + for i,j in zip(get_l2(),list(range(len(get_l2())))): assert i[0]==j local_test(lambda:T.dscalar(),lambda:T.dscalar()) @@ -490,7 +490,7 @@ try: m.a = [4, 5, 6] assert False - except Exception, e: + except Exception as e: if exc_message(e).startswith("Cannot set readonly"): pass else: @@ -499,7 +499,7 @@ try: m.a[0] = 4 assert False - except Exception, e: + except Exception as e: if exc_message(e).startswith("Cannot set readonly"): pass else: @@ -515,7 +515,7 @@ try: m.a[0] = 4 assert False - except Exception, e: + except Exception as e: if exc_message(e).startswith("Cannot set readonly"): pass else: @@ -635,7 +635,7 @@ try: m = M.make() assert False - except ValueError, e: + except ValueError as e: if str(exc_message(e)).startswith('Variable listed in both inputs and up'): pass else: @@ -668,7 +668,7 @@ try: m.f(0, 0) assert False - except TypeError, e: + except TypeError as e: if not str(e).startswith('Tried to provide value for implicit input'): raise @@ -690,7 +690,7 @@ mode = get_mode() m = M.make(x=numpy.zeros((4,5)), y=numpy.ones((2,3)), mode=mode) - m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1)) + m_dup = pickle.loads(pickle.dumps(m, protocol=-1)) assert numpy.all(m.x == m_dup.x) and numpy.all(m.y == m_dup.y) @@ -731,7 +731,7 @@ try: m.f.pickle_aliased_memory_strategy = 'warn' m.g.pickle_aliased_memory_strategy = 'warn' - m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1)) + m_dup = pickle.loads(pickle.dumps(m, protocol=-1)) assert sio.getvalue().startswith('aliased relat') finally: logging.getLogger('theano.compile.function_module').removeHandler(handler) @@ -740,8 +740,8 @@ try: m.f.pickle_aliased_memory_strategy = 'raise' m.g.pickle_aliased_memory_strategy = 'raise' - m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1)) - except AliasedMemoryError, e: + m_dup = pickle.loadRefactoringTool: No changes to ./theano/compile/tests/test_modes.py RefactoringTool: No changes to ./theano/compile/tests/test_misc.py RefactoringTool: Refactored ./theano/compile/tests/test_inplace_opt_for_value.py RefactoringTool: Refactored ./theano/compile/tests/test_function_module.py s(pickle.dumps(m, protocol=-1)) + except AliasedMemoryError as e: return assert 0 #should have failed to pickle @@ -752,7 +752,7 @@ #m's memory is aliased differently.... m.y = m.x[1:2] - m_dup = cPickle.loads(cPickle.dumps(m, protocol=-1)) + m_dup = pickle.loads(pickle.dumps(m, protocol=-1)) if 0: #is m_dup's memory aliased the same way? --- ./theano/compile/tests/test_inplace_opt_for_value.py (original) +++ ./theano/compile/tests/test_inplace_opt_for_value.py (refactored) @@ -112,7 +112,7 @@ T,M = x.shape z = N.zeros((T+1, M)) z[0] = z0 - for i in xrange(T): + for i in range(T): z[i+1] = N.tanh(N.dot(z[i], A) + x[i]) out[0][0] = z @@ -145,7 +145,7 @@ T = Tp1 - 1 gx = N.zeros((T, M)) - for i in xrange(T-1, -1, -1): + for i in range(T-1, -1, -1): #back through the tanh gx[i] = gz[i+1] * (1.0 - z[i+1] * z[i+1]) @@ -216,13 +216,13 @@ if 0: for i, node in enumerate(rnn.minimizer.step_cost.maker.fgraph.toposort()): - print i, node + print(i, node) niter=1500 if theano.config.mode=='DEBUG_MODE': niter=30 - for i in xrange(niter): + for i in range(niter): rnn.minimizer.step_cost(x, y) if theano.config.mode=='DEBUG_MODE': assert rnn.minimizer.step_cost(x,y) < -.9 #it starts around -.28 @@ -261,14 +261,14 @@ topo1=rnn1.minimizer.step_cost.maker.fgraph.toposort() topo2=rnn2.minimizer.step_cost.maker.fgraph.toposort() for i in range(len(topo1)): - print '1',i, topo1[i] - print '2',i, topo2[i] + print('1',i, topo1[i]) + print('2',i, topo2[i]) if 0: topo1=rnn1.minimizer.step.maker.fgraph.toposort() topo2=rnn2.minimizer.step.maker.fgraph.toposort() for i in range(len(topo1)): - print '1',i, topo1[i] - print '2',i, topo2[i] + print('1',i, topo1[i]) + print('2',i, topo2[i]) import theano.printing #print len(rnn1.minimizer.step.maker.inputs) @@ -289,7 +289,7 @@ niter=3 - for i in xrange(niter): + for i in range(niter): #print rnn1.minimizer.step_cost(x, y) #print rnn2.minimizer.step_cost(x, y) --- ./theano/compile/tests/test_function_module.py (original) +++ ./theano/compile/tests/test_function_module.py (refactored) @@ -1,5 +1,5 @@ import copy -import cPickle +import pickle import numpy import unittest @@ -23,7 +23,7 @@ def checkfor(testcase, fn, E): try: fn() - except Exception, e: + except Exception as e: if isinstance(e, E): # we got the exception we wanted return @@ -405,7 +405,7 @@ try: g = copy.deepcopy(f) - except NotImplementedError, e: + except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: @@ -452,7 +452,7 @@ hc = copy.deepcopy(h, memo = memo) memo.update({id(h): hc}) fc = copy.deepcopy(f, memo = memo) - except NotImplementedError, e: + except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: @@ -471,9 +471,9 @@ try: # Note that here we also test protocol 0 on purpose, since it # should work (even though one should not use it). - g = cPickle.loads(cPickle.dumps(f, protocol=0)) - g = cPickle.loads(cPickle.dumps(f, protocol=-1)) - except NotImplementedError, e: + g = pickle.loads(pickle.dumps(f, protocol=0)) + g = pickle.loads(pickle.dumps(f, protocol=-1)) + except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: @@ -510,14 +510,14 @@ old_default_link = config.linker trRefactoringTool: Refactored ./theano/compile/tests/test_debugmode.py RefactoringTool: No changes to ./theano/compile/tests/test_builders.py RefactoringTool: Refactored ./theano/compile/sharedvalue.py RefactoringTool: Refactored ./theano/compile/profiling.py y: try: - str_f = cPickle.dumps(f, protocol=-1) + str_f = pickle.dumps(f, protocol=-1) config.mode = 'Mode' config.linker = 'py' config.optimizer = 'None' - g = cPickle.loads(str_f) + g = pickle.loads(str_f) #print g.maker.mode #print compile.mode.default_mode - except NotImplementedError, e: + except NotImplementedError as e: if e[0].startswith('DebugMode is not pickl'): g = 'ok' finally: @@ -570,9 +570,9 @@ # try to pickle the entire things try: - saved_format = cPickle.dumps(list_of_things, protocol=-1) - new_list_of_things = cPickle.loads(saved_format) - except NotImplementedError, e: + saved_format = pickle.dumps(list_of_things, protocol=-1) + new_list_of_things = pickle.loads(saved_format) + except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: @@ -634,18 +634,18 @@ from theano.compat import BytesIO fp = BytesIO() - p = cPickle.Pickler(fp, 2) + p = pickle.Pickler(fp, 2) p.persistent_id = pers_save try: p.dump(f) - except NotImplementedError, e: + except NotImplementedError as e: if exc_message(e).startswith('DebugMode is not picklable'): return else: raise fp2 = BytesIO(fp.getvalue()) fp.close() - p = cPickle.Unpickler(fp2) + p = pickle.Unpickler(fp2) p.persistent_load = pers_load f2 = p.load() fp2.close() @@ -657,7 +657,7 @@ try: blah2 = copy.deepcopy(blah) - except NotImplementedError, e: + except NotImplementedError as e: if e[0].startswith('DebugMode is not picklable'): return else: @@ -720,5 +720,5 @@ t = T_picklefunction() def fu(b): assert b - t.failUnless = fu + t.assertTrue = fu t.test_deepcopy_shared_container() --- ./theano/compile/tests/test_debugmode.py (original) +++ ./theano/compile/tests/test_debugmode.py (refactored) @@ -215,7 +215,7 @@ try: f_inconsistent([1.0, 2.0, 3.0], [2, 3, 4]) - except debugmode.BadThunkOutput, e: + except debugmode.BadThunkOutput as e: #print repr(e) assert e.r.owner.op is inconsistent return # TEST PASS @@ -241,7 +241,7 @@ try: f([1.0, 2.0, 3.0], [2, 3, 4],) - except debugmode.BadOptimization, e: + except debugmode.BadOptimization as e: assert str(e.reason) == 'insert_broken_add' return # TEST PASS @@ -500,7 +500,7 @@ try: f([1, 2, 3, 4], [5, 6, 7, 8]) assert False # DebugMode should have caught the error - except debugmode.BadViewMap, e: + except debugmode.BadViewMap as e: #print e pass --- ./theano/compile/sharedvalue.py (original) +++ ./theano/compile/sharedvalue.py (refactored) @@ -196,7 +196,7 @@ # This was done on purpose, the rationale being that if kwargs # were supplied, the user didn't want them to be ignored. - except MemoryError, e: + except MemoryError as e: e.args = e.args + ('you might consider' ' using \'theano.shared(..., borrow=True)\'',) raise --- ./theano/compile/profiling.py (original) +++ ./theano/compile/profiling.py (refactored) @@ -67,7 +67,7 @@ to_sum.append(ps) else: #TODO print the name if there is one! - print 'Skipping empty Profile' + print('Skipping empty Profile') if len(to_sum) > 1: # Make a global profile cum = copy.copy(to_sum[0]) @@ -82,7 +82,7 @@ for attr in ["apply_time", "apply_callcount", "apply_cimpl", "variable_shape", "variable_strides"]: cum_attr = getattr(cum, attr) - for key, val in getattr(ps, attr).iteritems(): + for key, val in getattr(ps, attr).items(): assert key not in cum_attr cum_attr[key] = val @@ -213,7 +213,7 @@ """dict op -> total time on thunks""" # timing is stored by node, we compute timing by class on demand rval = {} - for node, t in self.apply_time.items(): + for node, t in list(self.apply_time.items()): typ = type(node.op) rval.setdefault(typ, 0) rval[typ] += t @@ -223,7 +223,7 @@ """dict op -> total number of thunk calls""" # timing is stored by node, we compute timing by class on demand rval = {} - for node, count in self.apply_callcount.items(): + for node, count in list(self.apply_callcount.items()): typ = type(node.op) rval.setdefault(typ, 0) rval[typ] += count @@ -233,7 +233,7 @@ """dict op -> total number of nodes""" # timing is stored by node, we compute timing by class on demand rval = {} - for node, count in self.apply_callcount.items(): + for node, count in list(self.apply_callcount.items()): typ = type(node.op) rval.setdefault(typ, 0) rval[typ] += 1 @@ -258,7 +258,7 @@ """dict op -> total time on thunks""" # timing is stored by node, we compute timing by Op on demand rval = {} - for node, t in self.apply_time.items(): + for node, t in list(self.apply_time.items()): rval.setdefault(node.op, 0) rval[node.op] += t return rval @@ -267,7 +267,7 @@ """dict op -> total number of thunk calls""" # timing is stored by node, we compute timing by Op on demand rval = {} - for node, count in self.apply_callcount.items(): + for node, count in list(self.apply_callcount.items()): rval.setdefault(node.op, 0) rval[node.op] += count return rval @@ -276,7 +276,7 @@ """dict op -> total number of nodes""" # timing is stored by node, we compute timing by Op on demand rval = {} - for node, count in self.apply_callcount.items(): + for node, count in list(self.apply_callcount.items()): rval.setdefault(node.op, 0) rval[node.op] += 1 return rval @@ -298,8 +298,8 @@ else: local_time = 0 if local_time == 0: - print >> file, ('ProfileMode.summary_class: total time 0' - ' (did you forget to enable counters?)') + print(('ProfileMode.summary_class: total time 0' + ' (did you forget to enable counters?)'), file=file) return class_time = self.class_time() class_call = self.class_callcount() @@ -313,12 +313,12 @@ class_impl.get(clas, ' '), class_call.get(clas, 0), class_apply.get(clas, 0)) - for clas, t in class_time.items()] + for clas, t in list(class_time.items())] otimes.sort() otimes.reverse() tot = 0 - print >> file, 'Class' - print >> file, '---' + print('Class', file=file) + print('---', file=file) #print >> file, '<% time> ,' #print >>file, '