summaryrefslogtreecommitdiffstats
path: root/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg
diff options
context:
space:
mode:
authorPavel Aharoni <pa0916@att.com>2017-03-29 13:35:45 +0300
committerPavel Aharoni <pa0916@att.com>2017-03-29 13:35:45 +0300
commite2cc2530fc6d54ebc975c01a4ff887ce12f0a736 (patch)
tree38385867295c8a09fb0d7f8eaf5fa78179e5b13a /jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg
parentbccebaa9888906f8ff78172f62ec592956066d82 (diff)
[SDC-6] sdc-distribution-client 1707 rebasing
Change-Id: I322a05fd79beb6ba4fee4d32afffecf531b86e98 Signed-off-by: Pavel Aharoni <pa0916@att.com>
Diffstat (limited to 'jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg')
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/PKG-INFO48
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/SOURCES.txt12
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/dependency_links.txt1
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/not-zip-safe1
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/top_level.txt1
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/__init__.py11
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py2.py223
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py3.py95
-rw-r--r--jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/test.py919
9 files changed, 1311 insertions, 0 deletions
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/PKG-INFO b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/PKG-INFO
new file mode 100644
index 0000000..2fc3a20
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/PKG-INFO
@@ -0,0 +1,48 @@
+Metadata-Version: 1.1
+Name: unicodecsv
+Version: 0.14.1
+Summary: Python2's stdlib csv module is nice, but it doesn't support unicode. This module is a drop-in replacement which *does*.
+Home-page: https://github.com/jdunck/python-unicodecsv
+Author: Jeremy Dunck
+Author-email: jdunck@gmail.com
+License: BSD License
+Description: unicodecsv
+ ==========
+
+ The unicodecsv is a drop-in replacement for Python 2.7's csv module which supports unicode strings without a hassle. Supported versions are python 2.7, 3.3, 3.4, 3.5, and pypy 2.4.0.
+
+ More fully
+ ----------
+
+ Python 2's csv module doesn't easily deal with unicode strings, leading to the dreaded "'ascii' codec can't encode characters in position ..." exception.
+
+ You can work around it by encoding everything just before calling write (or just after read), but why not add support to the serializer?
+
+ .. code-block:: pycon
+
+ >>> import unicodecsv as csv
+ >>> from io import BytesIO
+ >>> f = BytesIO()
+ >>> w = csv.writer(f, encoding='utf-8')
+ >>> _ = w.writerow((u'é', u'ñ'))
+ >>> _ = f.seek(0)
+ >>> r = csv.reader(f, encoding='utf-8')
+ >>> next(r) == [u'é', u'ñ']
+ True
+
+ Note that unicodecsv expects a bytestream, not unicode -- so there's no need to use `codecs.open` or similar wrappers. Plain `open(..., 'rb')` will do.
+
+ (Version 0.14.0 dropped support for python 2.6, but 0.14.1 added it back. See c0b7655248c4249 for the mistaken, breaking change.)
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Python :: Implementation :: CPython
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/SOURCES.txt b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/SOURCES.txt
new file mode 100644
index 0000000..bb7ea0b
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/SOURCES.txt
@@ -0,0 +1,12 @@
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+unicodecsv/__init__.py
+unicodecsv/py2.py
+unicodecsv/py3.py
+unicodecsv/test.py
+unicodecsv.egg-info/PKG-INFO
+unicodecsv.egg-info/SOURCES.txt
+unicodecsv.egg-info/dependency_links.txt
+unicodecsv.egg-info/top_level.txt \ No newline at end of file
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/dependency_links.txt b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/not-zip-safe b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/not-zip-safe
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/top_level.txt b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/top_level.txt
new file mode 100644
index 0000000..fee1540
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/EGG-INFO/top_level.txt
@@ -0,0 +1 @@
+unicodecsv
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/__init__.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/__init__.py
new file mode 100644
index 0000000..270bdee
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/__init__.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+# http://semver.org/
+import sys
+
+if sys.version_info >= (3, 0):
+ from unicodecsv.py3 import *
+else:
+ from unicodecsv.py2 import *
+
+VERSION = (0, 14, 1)
+__version__ = ".".join(map(str, VERSION))
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py2.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py2.py
new file mode 100644
index 0000000..f436397
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py2.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+import csv
+import numbers
+
+from itertools import izip
+
+pass_throughs = [
+ 'register_dialect',
+ 'unregister_dialect',
+ 'get_dialect',
+ 'list_dialects',
+ 'field_size_limit',
+ 'Dialect',
+ 'excel',
+ 'excel_tab',
+ 'Sniffer',
+ 'QUOTE_ALL',
+ 'QUOTE_MINIMAL',
+ 'QUOTE_NONNUMERIC',
+ 'QUOTE_NONE',
+ 'Error'
+]
+__all__ = [
+ 'reader',
+ 'writer',
+ 'DictReader',
+ 'DictWriter',
+] + pass_throughs
+
+for prop in pass_throughs:
+ globals()[prop] = getattr(csv, prop)
+
+
+def _stringify(s, encoding, errors):
+ if s is None:
+ return ''
+ if isinstance(s, unicode):
+ return s.encode(encoding, errors)
+ elif isinstance(s, numbers.Number):
+ pass # let csv.QUOTE_NONNUMERIC do its thing.
+ elif not isinstance(s, str):
+ s = str(s)
+ return s
+
+
+def _stringify_list(l, encoding, errors='strict'):
+ try:
+ return [_stringify(s, encoding, errors) for s in iter(l)]
+ except TypeError as e:
+ raise csv.Error(str(e))
+
+
+def _unicodify(s, encoding):
+ if s is None:
+ return None
+ if isinstance(s, (unicode, int, float)):
+ return s
+ elif isinstance(s, str):
+ return s.decode(encoding)
+ return s
+
+
+class UnicodeWriter(object):
+ """
+ >>> import unicodecsv
+ >>> from cStringIO import StringIO
+ >>> f = StringIO()
+ >>> w = unicodecsv.writer(f, encoding='utf-8')
+ >>> w.writerow((u'é', u'ñ'))
+ >>> f.seek(0)
+ >>> r = unicodecsv.reader(f, encoding='utf-8')
+ >>> row = r.next()
+ >>> row[0] == u'é'
+ True
+ >>> row[1] == u'ñ'
+ True
+ """
+ def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
+ *args, **kwds):
+ self.encoding = encoding
+ self.writer = csv.writer(f, dialect, *args, **kwds)
+ self.encoding_errors = errors
+
+ def writerow(self, row):
+ return self.writer.writerow(
+ _stringify_list(row, self.encoding, self.encoding_errors))
+
+ def writerows(self, rows):
+ for row in rows:
+ self.writerow(row)
+
+ @property
+ def dialect(self):
+ return self.writer.dialect
+writer = UnicodeWriter
+
+
+class UnicodeReader(object):
+ def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
+ **kwds):
+
+ format_params = ['delimiter', 'doublequote', 'escapechar',
+ 'lineterminator', 'quotechar', 'quoting',
+ 'skipinitialspace']
+
+ if dialect is None:
+ if not any([kwd_name in format_params
+ for kwd_name in kwds.keys()]):
+ dialect = csv.excel
+ self.reader = csv.reader(f, dialect, **kwds)
+ self.encoding = encoding
+ self.encoding_errors = errors
+ self._parse_numerics = bool(
+ self.dialect.quoting & csv.QUOTE_NONNUMERIC)
+
+ def next(self):
+ row = self.reader.next()
+ encoding = self.encoding
+ encoding_errors = self.encoding_errors
+ unicode_ = unicode
+ if self._parse_numerics:
+ float_ = float
+ return [(value if isinstance(value, float_) else
+ unicode_(value, encoding, encoding_errors))
+ for value in row]
+ else:
+ return [unicode_(value, encoding, encoding_errors)
+ for value in row]
+
+ def __iter__(self):
+ return self
+
+ @property
+ def dialect(self):
+ return self.reader.dialect
+
+ @property
+ def line_num(self):
+ return self.reader.line_num
+reader = UnicodeReader
+
+
+class DictWriter(csv.DictWriter):
+ """
+ >>> from cStringIO import StringIO
+ >>> f = StringIO()
+ >>> w = DictWriter(f, ['a', u'ñ', 'b'], restval=u'î')
+ >>> w.writerow({'a':'1', u'ñ':'2'})
+ >>> w.writerow({'a':'1', u'ñ':'2', 'b':u'ø'})
+ >>> w.writerow({'a':u'é', u'ñ':'2'})
+ >>> f.seek(0)
+ >>> r = DictReader(f, fieldnames=['a', u'ñ'], restkey='r')
+ >>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'î']}
+ True
+ >>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'\xc3\xb8']}
+ True
+ >>> r.next() == {'a': u'\xc3\xa9', u'ñ':'2', 'r': [u'\xc3\xae']}
+ True
+ """
+ def __init__(self, csvfile, fieldnames, restval='',
+ extrasaction='raise', dialect='excel', encoding='utf-8',
+ errors='strict', *args, **kwds):
+ self.encoding = encoding
+ csv.DictWriter.__init__(self, csvfile, fieldnames, restval,
+ extrasaction, dialect, *args, **kwds)
+ self.writer = UnicodeWriter(csvfile, dialect, encoding=encoding,
+ errors=errors, *args, **kwds)
+ self.encoding_errors = errors
+
+ def writeheader(self):
+ header = dict(zip(self.fieldnames, self.fieldnames))
+ self.writerow(header)
+
+
+class DictReader(csv.DictReader):
+ """
+ >>> from cStringIO import StringIO
+ >>> f = StringIO()
+ >>> w = DictWriter(f, fieldnames=['name', 'place'])
+ >>> w.writerow({'name': 'Cary Grant', 'place': 'hollywood'})
+ >>> w.writerow({'name': 'Nathan Brillstone', 'place': u'øLand'})
+ >>> w.writerow({'name': u'Will ø. Unicoder', 'place': u'éSpandland'})
+ >>> f.seek(0)
+ >>> r = DictReader(f, fieldnames=['name', 'place'])
+ >>> print r.next() == {'name': 'Cary Grant', 'place': 'hollywood'}
+ True
+ >>> print r.next() == {'name': 'Nathan Brillstone', 'place': u'øLand'}
+ True
+ >>> print r.next() == {'name': u'Will ø. Unicoder', 'place': u'éSpandland'}
+ True
+ """
+ def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
+ dialect='excel', encoding='utf-8', errors='strict', *args,
+ **kwds):
+ if fieldnames is not None:
+ fieldnames = _stringify_list(fieldnames, encoding)
+ csv.DictReader.__init__(self, csvfile, fieldnames, restkey, restval,
+ dialect, *args, **kwds)
+ self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
+ errors=errors, *args, **kwds)
+ if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
+ # Python 2.5 fieldnames workaround.
+ # See http://bugs.python.org/issue3436
+ reader = UnicodeReader(csvfile, dialect, encoding=encoding,
+ *args, **kwds)
+ self.fieldnames = _stringify_list(reader.next(), reader.encoding)
+
+ if self.fieldnames is not None:
+ self.unicode_fieldnames = [_unicodify(f, encoding) for f in
+ self.fieldnames]
+ else:
+ self.unicode_fieldnames = []
+
+ self.unicode_restkey = _unicodify(restkey, encoding)
+
+ def next(self):
+ row = csv.DictReader.next(self)
+ result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
+ izip(self.fieldnames, self.unicode_fieldnames))
+ rest = row.get(self.restkey)
+ if rest:
+ result[self.unicode_restkey] = rest
+ return result
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py3.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py3.py
new file mode 100644
index 0000000..13a8bf7
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/py3.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+import csv
+from csv import *
+
+
+class _UnicodeWriteWrapper(object):
+ """Simple write() wrapper that converts unicode to bytes."""
+
+ def __init__(self, binary, encoding, errors):
+ self.binary = binary
+ self.encoding = encoding
+ self.errors = errors
+
+ def write(self, string):
+ return self.binary.write(string.encode(self.encoding, self.errors))
+
+
+class UnicodeWriter(object):
+ def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
+ *args, **kwds):
+ if f is None:
+ raise TypeError
+
+ f = _UnicodeWriteWrapper(f, encoding=encoding, errors=errors)
+ self.writer = csv.writer(f, dialect, *args, **kwds)
+
+ def writerow(self, row):
+ return self.writer.writerow(row)
+
+ def writerows(self, rows):
+ return self.writer.writerows(rows)
+
+ @property
+ def dialect(self):
+ return self.writer.dialect
+
+
+class UnicodeReader(object):
+ def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
+ **kwds):
+
+ format_params = ['delimiter', 'doublequote', 'escapechar',
+ 'lineterminator', 'quotechar', 'quoting',
+ 'skipinitialspace']
+
+ if dialect is None:
+ if not any([kwd_name in format_params
+ for kwd_name in kwds.keys()]):
+ dialect = csv.excel
+
+ f = (bs.decode(encoding, errors=errors) for bs in f)
+ self.reader = csv.reader(f, dialect, **kwds)
+
+ def __next__(self):
+ return self.reader.__next__()
+
+ def __iter__(self):
+ return self
+
+ @property
+ def dialect(self):
+ return self.reader.dialect
+
+ @property
+ def line_num(self):
+ return self.reader.line_num
+
+
+writer = UnicodeWriter
+reader = UnicodeReader
+
+
+class DictWriter(csv.DictWriter):
+ def __init__(self, csvfile, fieldnames, restval='',
+ extrasaction='raise', dialect='excel', encoding='utf-8',
+ errors='strict', *args, **kwds):
+ super().__init__(csvfile, fieldnames, restval,
+ extrasaction, dialect, *args, **kwds)
+ self.writer = UnicodeWriter(csvfile, dialect, encoding=encoding,
+ errors=errors, *args, **kwds)
+ self.encoding_errors = errors
+
+ def writeheader(self):
+ header = dict(zip(self.fieldnames, self.fieldnames))
+ self.writerow(header)
+
+
+class DictReader(csv.DictReader):
+ def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
+ dialect='excel', encoding='utf-8', errors='strict', *args,
+ **kwds):
+ csv.DictReader.__init__(self, csvfile, fieldnames, restkey, restval,
+ dialect, *args, **kwds)
+ self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
+ errors=errors, *args, **kwds)
diff --git a/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/test.py b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/test.py
new file mode 100644
index 0000000..f3d0377
--- /dev/null
+++ b/jython-tosca-parser/src/main/resources/Lib/site-packages/unicodecsv-0.14.1-py2.7.egg/unicodecsv/test.py
@@ -0,0 +1,919 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2001,2002 Python Software Foundation
+# csv package unit tests
+
+import array
+import decimal
+import os
+import string
+import sys
+import tempfile
+import unittest2 as unittest
+from codecs import EncodedFile
+from io import BytesIO
+
+import unicodecsv as csv
+
+try:
+ # Python 2
+ chr = unichr
+except:
+ pass
+
+
+# pypy and cpython differ under which exception is raised under some
+# circumstances e.g. whether a module is written in C or not.
+py_compat_exc = (TypeError, AttributeError)
+
+
+class Test_Csv(unittest.TestCase):
+ """
+ Test the underlying C csv parser in ways that are not appropriate
+ from the high level interface. Further tests of this nature are done
+ in TestDialectRegistry.
+ """
+ def _test_arg_valid(self, ctor, arg):
+ self.assertRaises(py_compat_exc, ctor)
+ self.assertRaises(py_compat_exc, ctor, None)
+ self.assertRaises(py_compat_exc, ctor, arg, bad_attr=0)
+ self.assertRaises(py_compat_exc, ctor, arg, delimiter=0)
+ self.assertRaises(py_compat_exc, ctor, arg, delimiter='XX')
+ self.assertRaises(csv.Error, ctor, arg, 'foo')
+ self.assertRaises(py_compat_exc, ctor, arg, delimiter=None)
+ self.assertRaises(py_compat_exc, ctor, arg, delimiter=1)
+ self.assertRaises(py_compat_exc, ctor, arg, quotechar=1)
+ self.assertRaises(py_compat_exc, ctor, arg, lineterminator=None)
+ self.assertRaises(py_compat_exc, ctor, arg, lineterminator=1)
+ self.assertRaises(py_compat_exc, ctor, arg, quoting=None)
+ self.assertRaises(py_compat_exc, ctor, arg,
+ quoting=csv.QUOTE_ALL, quotechar='')
+ self.assertRaises(py_compat_exc, ctor, arg,
+ quoting=csv.QUOTE_ALL, quotechar=None)
+
+ def test_reader_arg_valid(self):
+ self._test_arg_valid(csv.reader, [])
+
+ def test_writer_arg_valid(self):
+ self._test_arg_valid(csv.writer, BytesIO())
+
+ def _test_default_attrs(self, ctor, *args):
+ obj = ctor(*args)
+ # Check defaults
+ self.assertEqual(obj.dialect.delimiter, ',')
+ self.assertEqual(obj.dialect.doublequote, True)
+ self.assertEqual(obj.dialect.escapechar, None)
+ self.assertEqual(obj.dialect.lineterminator, "\r\n")
+ self.assertEqual(obj.dialect.quotechar, '"')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_MINIMAL)
+ self.assertEqual(obj.dialect.skipinitialspace, False)
+ self.assertEqual(obj.dialect.strict, False)
+ # Try deleting or changing attributes (they are read-only)
+ self.assertRaises(py_compat_exc, delattr,
+ obj.dialect, 'delimiter')
+ self.assertRaises(py_compat_exc, setattr,
+ obj.dialect, 'delimiter', ':')
+ self.assertRaises(py_compat_exc, delattr,
+ obj.dialect, 'quoting')
+ self.assertRaises(py_compat_exc, setattr,
+ obj.dialect, 'quoting', None)
+
+ def test_reader_attrs(self):
+ self._test_default_attrs(csv.reader, [])
+
+ def test_writer_attrs(self):
+ self._test_default_attrs(csv.writer, BytesIO())
+
+ def _test_kw_attrs(self, ctor, *args):
+ # Now try with alternate options
+ kwargs = dict(delimiter=':', doublequote=False, escapechar='\\',
+ lineterminator='\r', quotechar='*',
+ quoting=csv.QUOTE_NONE, skipinitialspace=True,
+ strict=True)
+ obj = ctor(*args, **kwargs)
+ self.assertEqual(obj.dialect.delimiter, ':')
+ self.assertEqual(obj.dialect.doublequote, False)
+ self.assertEqual(obj.dialect.escapechar, '\\')
+ self.assertEqual(obj.dialect.lineterminator, "\r")
+ self.assertEqual(obj.dialect.quotechar, '*')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_NONE)
+ self.assertEqual(obj.dialect.skipinitialspace, True)
+ self.assertEqual(obj.dialect.strict, True)
+
+ def test_reader_kw_attrs(self):
+ self._test_kw_attrs(csv.reader, [])
+
+ def test_writer_kw_attrs(self):
+ self._test_kw_attrs(csv.writer, BytesIO())
+
+ def _test_dialect_attrs(self, ctor, *args):
+ # Now try with dialect-derived options
+ class dialect:
+ delimiter = '-'
+ doublequote = False
+ escapechar = '^'
+ lineterminator = '$'
+ quotechar = '#'
+ quoting = csv.QUOTE_ALL
+ skipinitialspace = True
+ strict = False
+ args = args + (dialect,)
+ obj = ctor(*args)
+ self.assertEqual(obj.dialect.delimiter, '-')
+ self.assertEqual(obj.dialect.doublequote, False)
+ self.assertEqual(obj.dialect.escapechar, '^')
+ self.assertEqual(obj.dialect.lineterminator, "$")
+ self.assertEqual(obj.dialect.quotechar, '#')
+ self.assertEqual(obj.dialect.quoting, csv.QUOTE_ALL)
+ self.assertEqual(obj.dialect.skipinitialspace, True)
+ self.assertEqual(obj.dialect.strict, False)
+
+ def test_reader_dialect_attrs(self):
+ self._test_dialect_attrs(csv.reader, [])
+
+ def test_writer_dialect_attrs(self):
+ self._test_dialect_attrs(csv.writer, BytesIO())
+
+ def _write_test(self, fields, expect, **kwargs):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, **kwargs)
+ writer.writerow(fields)
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(),
+ expect + writer.dialect.lineterminator.encode('utf-8'))
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_write_arg_valid(self):
+ import sys
+ pypy3 = hasattr(sys, 'pypy_version_info') and sys.version_info.major == 3
+
+ self.assertRaises(TypeError if pypy3 else csv.Error, self._write_test, None, '')
+ self._write_test((), b'')
+ self._write_test([None], b'""')
+ self.assertRaises(csv.Error, self._write_test,
+ [None], None, quoting=csv.QUOTE_NONE)
+
+ # Check that exceptions are passed up the chain
+ class BadList:
+ def __len__(self):
+ return 10
+
+ def __getitem__(self, i):
+ if i > 2:
+ raise IOError
+
+ self.assertRaises(IOError, self._write_test, BadList(), '')
+
+ class BadItem:
+ def __str__(self):
+ raise IOError
+
+ self.assertRaises(IOError, self._write_test, [BadItem()], '')
+
+ def test_write_bigfield(self):
+ # This exercises the buffer realloc functionality
+ bigstring = 'X' * 50000
+ self._write_test([bigstring, bigstring],
+ b','.join([bigstring.encode('utf-8')] * 2))
+
+ def test_write_quoting(self):
+ self._write_test(['a', 1, 'p,q'], b'a,1,"p,q"')
+ self.assertRaises(csv.Error,
+ self._write_test,
+ ['a', 1, 'p,q'], b'a,1,p,q',
+ quoting=csv.QUOTE_NONE)
+ self._write_test(['a', 1, 'p,q'], b'a,1,"p,q"',
+ quoting=csv.QUOTE_MINIMAL)
+ self._write_test(['a', 1, 'p,q'], b'"a",1,"p,q"',
+ quoting=csv.QUOTE_NONNUMERIC)
+ self._write_test(['a', 1, 'p,q'], b'"a","1","p,q"',
+ quoting=csv.QUOTE_ALL)
+ self._write_test(['a\nb', 1], b'"a\nb","1"',
+ quoting=csv.QUOTE_ALL)
+
+ def test_write_decimal(self):
+ self._write_test(['a', decimal.Decimal("1.1"), 'p,q'], b'"a",1.1,"p,q"',
+ quoting=csv.QUOTE_NONNUMERIC)
+
+ def test_write_escape(self):
+ self._write_test(['a', 1, 'p,q'], b'a,1,"p,q"',
+ escapechar='\\')
+ self.assertRaises(csv.Error,
+ self._write_test,
+ ['a', 1, 'p,"q"'], b'a,1,"p,\\"q\\""',
+ escapechar=None, doublequote=False)
+ self._write_test(['a', 1, 'p,"q"'], b'a,1,"p,\\"q\\""',
+ escapechar='\\', doublequote=False)
+ self._write_test(['"'], b'""""',
+ escapechar='\\', quoting=csv.QUOTE_MINIMAL)
+ self._write_test(['"'], b'\\"',
+ escapechar='\\', quoting=csv.QUOTE_MINIMAL,
+ doublequote=False)
+ self._write_test(['"'], b'\\"',
+ escapechar='\\', quoting=csv.QUOTE_NONE)
+ self._write_test(['a', 1, 'p,q'], b'a,1,p\\,q',
+ escapechar='\\', quoting=csv.QUOTE_NONE)
+
+ def test_writerows(self):
+ class BrokenFile:
+ def write(self, buf):
+ raise IOError
+
+ writer = csv.writer(BrokenFile())
+ self.assertRaises(IOError, writer.writerows, [['a']])
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj)
+ self.assertRaises(TypeError, writer.writerows, None)
+ writer.writerows([['a', 'b'], ['c', 'd']])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"a,b\r\nc,d\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def _read_test(self, input, expect, **kwargs):
+ reader = csv.reader(input, **kwargs)
+ result = list(reader)
+ self.assertEqual(result, expect)
+
+ def test_read_oddinputs(self):
+ self._read_test([], [])
+ self._read_test([b''], [[]])
+ self.assertRaises(csv.Error, self._read_test,
+ [b'"ab"c'], None, strict=1)
+ # cannot handle null bytes for the moment
+ self.assertRaises(csv.Error, self._read_test,
+ [b'ab\0c'], None, strict=1)
+ self._read_test([b'"ab"c'], [['abc']], doublequote=0)
+
+ def test_read_eol(self):
+ self._read_test([b'a,b'], [['a', 'b']])
+ self._read_test([b'a,b\n'], [['a', 'b']])
+ self._read_test([b'a,b\r\n'], [['a', 'b']])
+ self._read_test([b'a,b\r'], [['a', 'b']])
+ self.assertRaises(csv.Error, self._read_test, [b'a,b\rc,d'], [])
+ self.assertRaises(csv.Error, self._read_test, [b'a,b\nc,d'], [])
+ self.assertRaises(csv.Error, self._read_test, [b'a,b\r\nc,d'], [])
+
+ def test_read_escape(self):
+ self._read_test([b'a,\\b,c'], [['a', 'b', 'c']], escapechar='\\')
+ self._read_test([b'a,b\\,c'], [['a', 'b,c']], escapechar='\\')
+ self._read_test([b'a,"b\\,c"'], [['a', 'b,c']], escapechar='\\')
+ self._read_test([b'a,"b,\\c"'], [['a', 'b,c']], escapechar='\\')
+ self._read_test([b'a,"b,c\\""'], [['a', 'b,c"']], escapechar='\\')
+ self._read_test([b'a,"b,c"\\'], [['a', 'b,c\\']], escapechar='\\')
+
+ def test_read_quoting(self):
+ self._read_test([b'1,",3,",5'], [['1', ',3,', '5']])
+ self._read_test([b'1,",3,",5'], [['1', '"', '3', '"', '5']],
+ quotechar=None, escapechar='\\')
+ self._read_test([b'1,",3,",5'], [['1', '"', '3', '"', '5']],
+ quoting=csv.QUOTE_NONE, escapechar='\\')
+ # will this fail where locale uses comma for decimals?
+ self._read_test([b',3,"5",7.3, 9'], [['', 3, '5', 7.3, 9]],
+ quoting=csv.QUOTE_NONNUMERIC)
+ self._read_test([b'"a\nb", 7'], [['a\nb', ' 7']])
+ self.assertRaises(ValueError, self._read_test,
+ [b'abc,3'], [[]],
+ quoting=csv.QUOTE_NONNUMERIC)
+
+ def test_read_linenum(self):
+ for r in (csv.reader([b'line,1', b'line,2', b'line,3']),
+ csv.DictReader([b'line,1', b'line,2', b'line,3'],
+ fieldnames=['a', 'b', 'c'])):
+ self.assertEqual(r.line_num, 0)
+ next(r)
+ self.assertEqual(r.line_num, 1)
+ next(r)
+ self.assertEqual(r.line_num, 2)
+ next(r)
+ self.assertEqual(r.line_num, 3)
+ self.assertRaises(StopIteration, next, r)
+ self.assertEqual(r.line_num, 3)
+
+ def test_roundtrip_quoteed_newlines(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj)
+ self.assertRaises(TypeError, writer.writerows, None)
+ rows = [['a\nb', 'b'], ['c', 'x\r\nd']]
+ writer.writerows(rows)
+ fileobj.seek(0)
+ for i, row in enumerate(csv.reader(fileobj)):
+ self.assertEqual(row, rows[i])
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+
+class TestDialectRegistry(unittest.TestCase):
+ def test_registry_badargs(self):
+ self.assertRaises(TypeError, csv.list_dialects, None)
+ self.assertRaises(TypeError, csv.get_dialect)
+ self.assertRaises(csv.Error, csv.get_dialect, None)
+ self.assertRaises(csv.Error, csv.get_dialect, "nonesuch")
+ self.assertRaises(TypeError, csv.unregister_dialect)
+ self.assertRaises(csv.Error, csv.unregister_dialect, None)
+ self.assertRaises(csv.Error, csv.unregister_dialect, "nonesuch")
+ self.assertRaises(TypeError, csv.register_dialect, None)
+ self.assertRaises(TypeError, csv.register_dialect, None, None)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch", 0, 0)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
+ badargument=None)
+ self.assertRaises(TypeError, csv.register_dialect, "nonesuch",
+ quoting=None)
+ self.assertRaises(TypeError, csv.register_dialect, [])
+
+ def test_registry(self):
+ class myexceltsv(csv.excel):
+ delimiter = "\t"
+ name = "myexceltsv"
+ expected_dialects = csv.list_dialects() + [name]
+ expected_dialects.sort()
+ csv.register_dialect(name, myexceltsv)
+ try:
+ self.assertEqual(csv.get_dialect(name).delimiter, '\t')
+ got_dialects = csv.list_dialects()
+ got_dialects.sort()
+ self.assertEqual(expected_dialects, got_dialects)
+ finally:
+ csv.unregister_dialect(name)
+
+ def test_register_kwargs(self):
+ name = 'fedcba'
+ csv.register_dialect(name, delimiter=';')
+ try:
+ self.assertNotEqual(csv.get_dialect(name).delimiter, '\t')
+ self.assertEqual(list(csv.reader([b'X;Y;Z'], name)), [[u'X', u'Y', u'Z']])
+ finally:
+ csv.unregister_dialect(name)
+
+ def test_incomplete_dialect(self):
+ class myexceltsv(csv.Dialect):
+ delimiter = "\t"
+ self.assertRaises(csv.Error, myexceltsv)
+
+ def test_space_dialect(self):
+ class space(csv.excel):
+ delimiter = " "
+ quoting = csv.QUOTE_NONE
+ escapechar = "\\"
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"abc def\nc1ccccc1 benzene\n")
+ fileobj.seek(0)
+ rdr = csv.reader(fileobj, dialect=space())
+ self.assertEqual(next(rdr), ["abc", "def"])
+ self.assertEqual(next(rdr), ["c1ccccc1", "benzene"])
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_dialect_apply(self):
+ class testA(csv.excel):
+ delimiter = "\t"
+
+ class testB(csv.excel):
+ delimiter = ":"
+
+ class testC(csv.excel):
+ delimiter = "|"
+
+ csv.register_dialect('testC', testC)
+ try:
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj)
+ writer.writerow([1, 2, 3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"1,2,3\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, testA)
+ writer.writerow([1, 2, 3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"1\t2\t3\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect=testB())
+ writer.writerow([1, 2, 3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"1:2:3\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect='testC')
+ writer.writerow([1, 2, 3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"1|2|3\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect=testA, delimiter=';')
+ writer.writerow([1, 2, 3])
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), b"1;2;3\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ finally:
+ csv.unregister_dialect('testC')
+
+ def test_bad_dialect(self):
+ # Unknown parameter
+ self.assertRaises(TypeError, csv.reader, [], bad_attr=0)
+ # Bad values
+ self.assertRaises(TypeError, csv.reader, [], delimiter=None)
+ self.assertRaises(TypeError, csv.reader, [], quoting=-1)
+ self.assertRaises(TypeError, csv.reader, [], quoting=100)
+
+
+class TestCsvBase(unittest.TestCase):
+ def readerAssertEqual(self, input, expected_result):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(input)
+ fileobj.seek(0)
+ reader = csv.reader(fileobj, dialect=self.dialect)
+ fields = list(reader)
+ self.assertEqual(fields, expected_result)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def writerAssertEqual(self, input, expected_result):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect=self.dialect)
+ writer.writerows(input)
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected_result)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+
+class TestDialectExcel(TestCsvBase):
+ dialect = 'excel'
+
+ def test_single(self):
+ self.readerAssertEqual(b'abc', [['abc']])
+
+ def test_simple(self):
+ self.readerAssertEqual(b'1,2,3,4,5', [['1', '2', '3', '4', '5']])
+
+ def test_blankline(self):
+ self.readerAssertEqual(b'', [])
+
+ def test_empty_fields(self):
+ self.readerAssertEqual(b',', [['', '']])
+
+ def test_singlequoted(self):
+ self.readerAssertEqual(b'""', [['']])
+
+ def test_singlequoted_left_empty(self):
+ self.readerAssertEqual(b'"",', [['', '']])
+
+ def test_singlequoted_right_empty(self):
+ self.readerAssertEqual(b',""', [['', '']])
+
+ def test_single_quoted_quote(self):
+ self.readerAssertEqual(b'""""', [['"']])
+
+ def test_quoted_quotes(self):
+ self.readerAssertEqual(b'""""""', [['""']])
+
+ def test_inline_quote(self):
+ self.readerAssertEqual(b'a""b', [['a""b']])
+
+ def test_inline_quotes(self):
+ self.readerAssertEqual(b'a"b"c', [['a"b"c']])
+
+ def test_quotes_and_more(self):
+ # Excel would never write a field containing '"a"b', but when
+ # reading one, it will return 'ab'.
+ self.readerAssertEqual(b'"a"b', [['ab']])
+
+ def test_lone_quote(self):
+ self.readerAssertEqual(b'a"b', [['a"b']])
+
+ def test_quote_and_quote(self):
+ # Excel would never write a field containing '"a" "b"', but when
+ # reading one, it will return 'a "b"'.
+ self.readerAssertEqual(b'"a" "b"', [['a "b"']])
+
+ def test_space_and_quote(self):
+ self.readerAssertEqual(b' "a"', [[' "a"']])
+
+ def test_quoted(self):
+ self.readerAssertEqual(b'1,2,3,"I think, therefore I am",5,6',
+ [['1', '2', '3',
+ 'I think, therefore I am',
+ '5', '6']])
+
+ def test_quoted_quote(self):
+ value = b'1,2,3,"""I see,"" said the blind man","as he picked up his hammer and saw"'
+ self.readerAssertEqual(value,
+ [['1', '2', '3',
+ '"I see," said the blind man',
+ 'as he picked up his hammer and saw']])
+
+ def test_quoted_nl(self):
+ input = b'''\
+1,2,3,"""I see,""
+said the blind man","as he picked up his
+hammer and saw"
+9,8,7,6'''
+ self.readerAssertEqual(input,
+ [['1', '2', '3',
+ '"I see,"\nsaid the blind man',
+ 'as he picked up his\nhammer and saw'],
+ ['9', '8', '7', '6']])
+
+ def test_dubious_quote(self):
+ self.readerAssertEqual(b'12,12,1",', [['12', '12', '1"', '']])
+
+ def test_null(self):
+ self.writerAssertEqual([], b'')
+
+ def test_single_writer(self):
+ self.writerAssertEqual([['abc']], b'abc\r\n')
+
+ def test_simple_writer(self):
+ self.writerAssertEqual([[1, 2, 'abc', 3, 4]],
+ b'1,2,abc,3,4\r\n')
+
+ def test_quotes(self):
+ self.writerAssertEqual([[1, 2, 'a"bc"', 3, 4]],
+ b'1,2,"a""bc""",3,4\r\n')
+
+ def test_quote_fieldsep(self):
+ self.writerAssertEqual([['abc,def']],
+ b'"abc,def"\r\n')
+
+ def test_newlines(self):
+ self.writerAssertEqual([[1, 2, 'a\nbc', 3, 4]],
+ b'1,2,"a\nbc",3,4\r\n')
+
+
+class EscapedExcel(csv.excel):
+ quoting = csv.QUOTE_NONE
+ escapechar = '\\'
+
+
+class TestEscapedExcel(TestCsvBase):
+ dialect = EscapedExcel()
+
+ def test_escape_fieldsep(self):
+ self.writerAssertEqual([['abc,def']], b'abc\\,def\r\n')
+
+ def test_read_escape_fieldsep(self):
+ self.readerAssertEqual(b'abc\\,def\r\n', [['abc,def']])
+
+
+class QuotedEscapedExcel(csv.excel):
+ quoting = csv.QUOTE_NONNUMERIC
+ escapechar = '\\'
+
+
+class TestQuotedEscapedExcel(TestCsvBase):
+ dialect = QuotedEscapedExcel()
+
+ def test_write_escape_fieldsep(self):
+ self.writerAssertEqual([['abc,def']], b'"abc,def"\r\n')
+
+ def test_read_escape_fieldsep(self):
+ self.readerAssertEqual(b'"abc\\,def"\r\n', [['abc,def']])
+
+
+class TestDictFields(unittest.TestCase):
+ # "long" means the row is longer than the number of fieldnames
+ # "short" means there are fewer elements in the row than fieldnames
+ def test_write_simple_dict(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = open(name, 'w+b')
+ try:
+ writer = csv.DictWriter(fileobj, fieldnames=["f1", "f2", "f3"])
+ writer.writeheader()
+ fileobj.seek(0)
+ self.assertEqual(fileobj.readline(), b"f1,f2,f3\r\n")
+ writer.writerow({"f1": 10, "f3": "abc"})
+ fileobj.seek(0)
+ fileobj.readline() # header
+ self.assertEqual(fileobj.read(), b"10,,abc\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_write_unicode_header_dict(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = open(name, 'w+b')
+ try:
+ writer = csv.DictWriter(fileobj, fieldnames=[u"ñ", u"ö"])
+ writer.writeheader()
+ fileobj.seek(0)
+ self.assertEqual(fileobj.readline().decode('utf-8'), u"ñ,ö\r\n")
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_write_no_fields(self):
+ fileobj = BytesIO()
+ self.assertRaises(TypeError, csv.DictWriter, fileobj)
+
+ def test_read_dict_fields(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2", "f3"])
+ self.assertEqual(next(reader),
+ {"f1": '1', "f2": '2', "f3": 'abc'})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_read_dict_no_fieldnames(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"f1,f2,f3\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj)
+ self.assertEqual(reader.fieldnames,
+ ["f1", "f2", "f3"])
+ self.assertEqual(next(reader),
+ {"f1": '1', "f2": '2', "f3": 'abc'})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ # Two test cases to make sure existing ways of implicitly setting
+ # fieldnames continue to work. Both arise from discussion in issue3436.
+ def test_read_dict_fieldnames_from_file(self):
+ fd, name = tempfile.mkstemp()
+ f = os.fdopen(fd, "w+b")
+ try:
+ f.write(b"f1,f2,f3\r\n1,2,abc\r\n")
+ f.seek(0)
+ reader = csv.DictReader(f, fieldnames=next(csv.reader(f)))
+ self.assertEqual(reader.fieldnames,
+ ["f1", "f2", "f3"])
+ self.assertEqual(next(reader),
+ {"f1": '1', "f2": '2', "f3": 'abc'})
+ finally:
+ f.close()
+ os.unlink(name)
+
+ def test_read_dict_fieldnames_chain(self):
+ import itertools
+ fd, name = tempfile.mkstemp()
+ f = os.fdopen(fd, "w+b")
+ try:
+ f.write(b"f1,f2,f3\r\n1,2,abc\r\n")
+ f.seek(0)
+ reader = csv.DictReader(f)
+ first = next(reader)
+ for row in itertools.chain([first], reader):
+ self.assertEqual(reader.fieldnames, ["f1", "f2", "f3"])
+ self.assertEqual(row, {"f1": '1', "f2": '2', "f3": 'abc'})
+ finally:
+ f.close()
+ os.unlink(name)
+
+ def test_read_long(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ None: ["abc", "4", "5", "6"]})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_read_long_with_rest(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames=["f1", "f2"], restkey="_rest")
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ "_rest": ["abc", "4", "5", "6"]})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_read_long_with_rest_no_fieldnames(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"f1,f2\r\n1,2,abc,4,5,6\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj, restkey="_rest")
+ self.assertEqual(reader.fieldnames, ["f1", "f2"])
+ self.assertEqual(next(reader), {"f1": '1', "f2": '2',
+ "_rest": ["abc", "4", "5", "6"]})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_read_short(self):
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ fileobj.write(b"1,2,abc,4,5,6\r\n1,2,abc\r\n")
+ fileobj.seek(0)
+ reader = csv.DictReader(fileobj,
+ fieldnames="1 2 3 4 5 6".split(),
+ restval="DEFAULT")
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": 'DEFAULT', "5": 'DEFAULT',
+ "6": 'DEFAULT'})
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_read_multi(self):
+ sample = [
+ b'2147483648,43.0e12,17,abc,def\r\n',
+ b'147483648,43.0e2,17,abc,def\r\n',
+ b'47483648,43.0,170,abc,def\r\n'
+ ]
+
+ reader = csv.DictReader(sample,
+ fieldnames="i1 float i2 s1 s2".split())
+ self.assertEqual(next(reader), {"i1": '2147483648',
+ "float": '43.0e12',
+ "i2": '17',
+ "s1": 'abc',
+ "s2": 'def'})
+
+ def test_read_with_blanks(self):
+ reader = csv.DictReader([b"1,2,abc,4,5,6\r\n", b"\r\n",
+ b"1,2,abc,4,5,6\r\n"],
+ fieldnames="1 2 3 4 5 6".split())
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+
+ def test_read_semi_sep(self):
+ reader = csv.DictReader([b"1;2;abc;4;5;6\r\n"],
+ fieldnames="1 2 3 4 5 6".split(),
+ delimiter=';')
+ self.assertEqual(next(reader), {"1": '1', "2": '2', "3": 'abc',
+ "4": '4', "5": '5', "6": '6'})
+
+ def test_empty_file(self):
+ csv.DictReader(BytesIO())
+
+class TestArrayWrites(unittest.TestCase):
+ def test_int_write(self):
+ contents = [(20-i) for i in range(20)]
+ a = array.array('i', contents)
+
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = b",".join([str(i).encode('utf-8') for i in a])+b"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_double_write(self):
+ contents = [(20-i)*0.1 for i in range(20)]
+ a = array.array('d', contents)
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ float_repr = str
+ if sys.version_info >= (2, 7, 3):
+ float_repr = repr
+ expected = b",".join([float_repr(i).encode('utf-8') for i in a])+b"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_float_write(self):
+ contents = [(20-i)*0.1 for i in range(20)]
+ a = array.array('f', contents)
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ float_repr = str
+ if sys.version_info >= (2, 7, 3):
+ float_repr = repr
+ expected = b",".join([float_repr(i).encode('utf-8') for i in a])+b"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+ def test_char_write(self):
+ a = string.ascii_letters
+ fd, name = tempfile.mkstemp()
+ fileobj = os.fdopen(fd, "w+b")
+ try:
+ writer = csv.writer(fileobj, dialect="excel")
+ writer.writerow(a)
+ expected = ",".join(a).encode('utf-8')+b"\r\n"
+ fileobj.seek(0)
+ self.assertEqual(fileobj.read(), expected)
+ finally:
+ fileobj.close()
+ os.unlink(name)
+
+
+class TestUnicode(unittest.TestCase):
+ def test_unicode_read(self):
+ f = EncodedFile(BytesIO((u"Martin von Löwis,"
+ u"Marc André Lemburg,"
+ u"Guido van Rossum,"
+ u"François Pinard\r\n").encode('iso-8859-1')),
+ data_encoding='iso-8859-1')
+ reader = csv.reader(f, encoding='iso-8859-1')
+ self.assertEqual(list(reader), [[u"Martin von Löwis",
+ u"Marc André Lemburg",
+ u"Guido van Rossum",
+ u"François Pinard"]])
+
+
+class TestUnicodeErrors(unittest.TestCase):
+ def test_encode_error(self):
+ fd = BytesIO()
+ writer = csv.writer(fd, encoding='cp1252', errors='xmlcharrefreplace')
+ writer.writerow(['hello', chr(2603)])
+ self.assertEqual(fd.getvalue(), b'hello,&#2603;\r\n')
+
+ def test_encode_error_dictwriter(self):
+ fd = BytesIO()
+ dw = csv.DictWriter(fd, ['col1'],
+ encoding='cp1252', errors='xmlcharrefreplace')
+ dw.writerow({'col1': chr(2604)})
+ self.assertEqual(fd.getvalue(), b'&#2604;\r\n')
+
+ def test_decode_error(self):
+ """Make sure the specified error-handling mode is obeyed on readers."""
+ file = EncodedFile(BytesIO(u'Löwis,2,3'.encode('iso-8859-1')),
+ data_encoding='iso-8859-1')
+ reader = csv.reader(file, encoding='ascii', errors='ignore')
+ self.assertEqual(list(reader)[0][0], 'Lwis')
+
+ def test_decode_error_dictreader(self):
+ """Make sure the error-handling mode is obeyed on DictReaders."""
+ file = EncodedFile(BytesIO(u'name,height,weight\nLöwis,2,3'.encode('iso-8859-1')),
+ data_encoding='iso-8859-1')
+ reader = csv.DictReader(file, encoding='ascii', errors='ignore')
+ self.assertEqual(list(reader)[0]['name'], 'Lwis')