--- a/rows/plugins/plugin_csv.py
+++ b/rows/plugins/plugin_csv.py
@@ -15,33 +15,94 @@
 #    You should have received a copy of the GNU Lesser General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from __future__ import unicode_literals
-
 from functools import lru_cache
 from io import BytesIO, StringIO
 from itertools import islice
 
-import six
-import unicodecsv
+import csv
+
+# start /usr/lib/python3/dist-packages/unicodecsv/py3.py
+class _UnicodeWriteWrapper(object):
+    """Simple write() wrapper that converts unicode to bytes."""
+
+    def __init__(self, binary, encoding, errors):
+        self.binary = binary
+        self.encoding = encoding
+        self.errors = errors
+
+    def write(self, string):
+        return self.binary.write(string.encode(self.encoding, self.errors))
+
+class UnicodeWriter(object):
+    def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
+                 *args, **kwds):
+        if f is None:
+            raise TypeError
+
+        f = _UnicodeWriteWrapper(f, encoding=encoding, errors=errors)
+        self.writer = csv.writer(f, dialect, *args, **kwds)
+
+    def writerow(self, row):
+        return self.writer.writerow(row)
+
+    def writerows(self, rows):
+        return self.writer.writerows(rows)
+
+    @property
+    def dialect(self):
+        return self.writer.dialect
+
+
+class UnicodeReader(object):
+    def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
+                 **kwds):
+
+        format_params = ['delimiter', 'doublequote', 'escapechar',
+                     'lineterminator', 'quotechar', 'quoting',
+                     'skipinitialspace']
+
+        if dialect is None:
+            if not any([kwd_name in format_params
+                        for kwd_name in kwds.keys()]):
+                dialect = csv.excel
+
+        f = (bs.decode(encoding, errors=errors) for bs in f)
+        self.reader = csv.reader(f, dialect, **kwds)
+
+    def __next__(self):
+        return self.reader.__next__()
+
+    def __iter__(self):
+        return self
+
+    @property
+    def dialect(self):
+        return self.reader.dialect
+
+    @property
+    def line_num(self):
+        return self.reader.line_num
+# end /usr/lib/python3/dist-packages/unicodecsv/py3.py
+
 
 from rows import fields
 from rows.fields import make_header
 from rows.plugins.utils import create_table, ipartition, serialize
 from rows.utils import Source, detect_local_source, open_compressed
 
-sniffer = unicodecsv.Sniffer()
+sniffer = csv.Sniffer()
 # Some CSV files have more than 128kB of data in a cell, so we force this value
 # to be greater (16MB).
 # TODO: check if it impacts in memory usage.
 # TODO: may add option to change it by passing a parameter to import/export.
-unicodecsv.field_size_limit(16777216)
+csv.field_size_limit(16777216)
 
 
 def fix_dialect(dialect):
     if not dialect.doublequote and dialect.escapechar is None:
         dialect.doublequote = True
 
-    if dialect.quoting == unicodecsv.QUOTE_MINIMAL and dialect.quotechar == "'":
+    if dialect.quoting == csv.QUOTE_MINIMAL and dialect.quotechar == "'":
         # Python csv's Sniffer seems to detect a wrong quotechar when
         # quoting is minimal
         dialect.quotechar = '"'
@@ -96,31 +157,14 @@
     }
 
 
-class excel_semicolon(unicodecsv.excel):
+class excel_semicolon(csv.excel):
     delimiter = ";"
 
 
-unicodecsv.register_dialect("excel-semicolon", excel_semicolon)
-
-
-if six.PY2:
-
-    def discover_dialect(sample, encoding=None, delimiters=(b",", b";", b"\t", b"|")):
-        """Discover a CSV dialect based on a sample size.
-
-        `encoding` is not used (Python 2)
-        """
-        try:
-            dialect = sniffer.sniff(sample, delimiters=delimiters)
-
-        except unicodecsv.Error:  # Couldn't detect: fall back to 'excel'
-            dialect = unicodecsv.excel
-
-        fix_dialect(dialect)
-        return dialect
+csv.register_dialect("excel-semicolon", excel_semicolon)
 
 
-elif six.PY3:
+if True:
 
     def discover_dialect(sample, encoding, delimiters=(",", ";", "\t", "|")):
         """Discover a CSV dialect based on a sample size.
@@ -149,8 +193,8 @@
         try:
             dialect = sniffer.sniff(decoded, delimiters=delimiters)
 
-        except unicodecsv.Error:  # Couldn't detect: fall back to 'excel'
-            dialect = unicodecsv.excel
+        except csv.Error:  # Couldn't detect: fall back to 'excel'
+            dialect = csv.excel
 
         fix_dialect(dialect)
         return dialect
@@ -186,7 +230,7 @@
             sample=read_sample(source.fobj, sample_size), encoding=source.encoding
         )
 
-    reader = unicodecsv.reader(source.fobj, encoding=encoding, dialect=dialect)
+    reader = UnicodeReader(source.fobj, encoding=encoding, dialect=dialect)
 
     meta = {"imported_from": "csv", "source": source}
     return create_table(reader, meta=meta, *args, **kwargs)
@@ -196,7 +240,7 @@
     table,
     filename_or_fobj=None,
     encoding="utf-8",
-    dialect=unicodecsv.excel,
+    dialect=csv.excel,
     batch_size=100,
     callback=None,
     *args,
@@ -229,7 +273,7 @@
     # TODO: may use `io.BufferedWriter` instead of `ipartition` so user can
     # choose the real size (in Bytes) when to flush to the file system, instead
     # number of rows
-    writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect)
+    writer = UnicodeWriter(source.fobj, encoding=encoding, dialect=dialect)
 
     if callback is None:
         for batch in ipartition(serialize(table, *args, **kwargs), batch_size):
@@ -264,8 +308,8 @@
         self._encoding = encoding
         self._field_names = None
         self._dialect = dialect
-        if isinstance(dialect, six.text_type):
-            self._dialect = unicodecsv.get_dialect(dialect)
+        if isinstance(dialect, str):
+            self._dialect = csv.get_dialect(dialect)
         self._schema = schema
         self._chunk_size = chunk_size
         self._sample_binary = self._sample_unicode = None
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,6 @@
 utils_requirements = ["requests", "requests-cache", "tqdm"]
 EXTRA_REQUIREMENTS = {
     "cli": ["click"] + utils_requirements,
-    "csv": ["unicodecsv"],
     "detect": ["file-magic"],
     "html": ["lxml"],  # apt: libxslt-dev libxml2-dev
     "ods": ["lxml"],
@@ -49,7 +48,7 @@
     "dataclasses",    
     "six",
     "requests",
-] + EXTRA_REQUIREMENTS["csv"]
+]
 LONG_DESCRIPTION = """
 No matter in which format your tabular data is: rows will import it,
 automatically detect types and give you high-level Python objects so you can
