-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathq.py
executable file
·1961 lines (1604 loc) · 82.6 KB
/
q.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python
# Copyright (C) 2012-2019 Harel Ben-Attia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details (doc/LICENSE contains
# a copy of it)
#
#
# Name : q (With respect to The Q Continuum)
# Author : Harel Ben Attia - [email protected], harelba @ github, @harelba on twitter
# Requires : python with sqlite3 (standard in python>=2.6)
#
#
# q allows performing SQL-like statements on tabular text data.
#
# Its purpose is to bring SQL expressive power to manipulating text data using the Linux command line.
#
# Full Documentation and details in http://harelba.github.io/q/
#
# Run with --help for command line details
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __version__ import q_version
__all__ = [ 'QTextAsData' ]
import os
import sys
import sqlite3
import gzip
import glob
from optparse import OptionParser,OptionGroup
import traceback as tb
import codecs
import locale
import time
import re
from six.moves import configparser, range, filter
import traceback
import csv
import hashlib
import uuid
import math
import six
import io
if six.PY3:
long = int
unicode = six.text_type
DEBUG = False
def get_stdout_encoding(encoding_override=None):
if encoding_override is not None and encoding_override != 'none':
return encoding_override
if sys.stdout.isatty():
return sys.stdout.encoding
else:
return locale.getpreferredencoding()
SHOW_SQL = False
def sha1(data):
if not isinstance(data,str) and not isinstance(data,unicode):
return hashlib.sha1(str(data)).hexdigest()
return hashlib.sha1(data).hexdigest()
def regexp(regular_expression, data):
if data is not None:
if not isinstance(data, str) and not isinstance(data, unicode):
data = str(data)
return re.search(regular_expression, data) is not None
else:
return False
class Sqlite3DBResults(object):
def __init__(self,query_column_names,results):
self.query_column_names = query_column_names
self.results = results
def percentile(l, p):
# TODO Alpha implementation, need to provide multiple interpolation methods, and add tests
if not l:
return None
k = p*(len(l) - 1)
f = math.floor(k)
c = math.ceil(k)
if c == f:
return l[int(k)]
return (c-k) * l[int(f)] + (k-f) * l[int(c)]
class StrictPercentile(object):
def __init__(self):
self.values = []
self.p = None
def step(self,value,p):
if self.p is None:
self.p = p
self.values.append(value)
def finalize(self):
if len(self.values) == 0 or (self.p < 0 or self.p > 1):
return None
else:
return percentile(sorted(self.values),self.p)
class Sqlite3DB(object):
def __init__(self, show_sql=SHOW_SQL):
self.show_sql = show_sql
self.conn = sqlite3.connect(':memory:')
self.last_temp_table_id = 10000
self.cursor = self.conn.cursor()
self.type_names = {
str: 'TEXT', int: 'INT', long : 'INT' , float: 'FLOAT', None: 'TEXT'}
self.numeric_column_types = set([int, long, float])
self.add_user_functions()
def done(self):
self.conn.commit()
def store_db_to_disk_standard(self,sqlite_db_filename,table_names_mapping):
new_db = sqlite3.connect(sqlite_db_filename,isolation_level=None)
c = new_db.cursor()
for s in self.conn.iterdump():
c.execute(s)
results = c.fetchall()
for source_filename_str,tn in six.iteritems(table_names_mapping):
c.execute('alter table `%s` rename to `%s`' % (tn, source_filename_str))
new_db.close()
def store_db_to_disk_fast(self,sqlite_db_filename,table_names_mapping):
try:
import sqlitebck
except ImportError as e:
msg = "sqlitebck python module cannot be found - fast store to disk cannot be performed. Note that for now, sqlitebck is not packaged as part of q. In order to use the fast method, you need to manually `pip install sqlitebck` into your python environment. We obviously consider this as a bug and it will be fixed once proper packaging will be done, making the fast method the standard one."
raise MissingSqliteBckModuleException(msg)
new_db = sqlite3.connect(sqlite_db_filename)
sqlitebck.copy(self.conn,new_db)
c = new_db.cursor()
for source_filename_str,tn in table_names_mapping.iteritems():
c.execute('alter table `%s` rename to `%s`' % (tn, source_filename_str))
new_db.close()
def store_db_to_disk(self,sqlite_db_filename,table_names_mapping,method='standard'):
if method == 'standard':
self.store_db_to_disk_standard(sqlite_db_filename,table_names_mapping)
elif method == 'fast':
self.store_db_to_disk_fast(sqlite_db_filename,table_names_mapping)
else:
raise ValueError('Unknown store-db-to-disk method %s' % method)
def add_user_functions(self):
self.conn.create_function("regexp", 2, regexp)
self.conn.create_function("sha1", 1, sha1)
self.conn.create_aggregate("percentile",2,StrictPercentile)
def is_numeric_type(self, column_type):
return column_type in self.numeric_column_types
def update_many(self, sql, params):
try:
if self.show_sql:
print(sql, " params: " + str(params))
self.cursor.executemany(sql, params)
finally:
pass # cursor.close()
def execute_and_fetch(self, q):
try:
if self.show_sql:
print(repr(q))
self.cursor.execute(q)
if self.cursor.description is not None:
# we decode the column names, so they can be encoded to any output format later on
if six.PY2:
query_column_names = [unicode(c[0],'utf-8') for c in self.cursor.description]
else:
query_column_names = [c[0] for c in self.cursor.description]
else:
query_column_names = None
result = self.cursor.fetchall()
finally:
pass # cursor.close()
return Sqlite3DBResults(query_column_names,result)
def _get_as_list_str(self, l):
return ",".join(['"%s"' % x.replace('"', '""') for x in l])
def _get_col_values_as_list_str(self, col_vals, col_types):
result = []
for col_val, col_type in zip(col_vals, col_types):
if col_val == '' and col_type is not str:
col_val = "null"
else:
if col_val is not None:
if "'" in col_val:
col_val = col_val.replace("'", "''")
col_val = "'" + col_val + "'"
else:
col_val = "null"
result.append(col_val)
return ",".join(result)
def generate_insert_row(self, table_name, column_names):
col_names_str = self._get_as_list_str(column_names)
question_marks = ", ".join(["?" for i in range(0, len(column_names))])
return 'INSERT INTO %s (%s) VALUES (%s)' % (table_name, col_names_str, question_marks)
def generate_begin_transaction(self):
return "BEGIN TRANSACTION"
def generate_end_transaction(self):
return "COMMIT"
# Get a list of column names so order will be preserved (Could have used OrderedDict, but
# then we would need python 2.7)
def generate_create_table(self, table_name, column_names, column_dict):
# Convert dict from python types to db types
column_name_to_db_type = dict(
(n, self.type_names[t]) for n, t in six.iteritems(column_dict))
column_defs = ','.join(['"%s" %s' % (
n.replace('"', '""'), column_name_to_db_type[n]) for n in column_names])
return 'CREATE TABLE %s (%s)' % (table_name, column_defs)
def generate_temp_table_name(self):
self.last_temp_table_id += 1
tn = "temp_table_%s" % self.last_temp_table_id
return tn
def generate_drop_table(self, table_name):
return "DROP TABLE %s" % table_name
def drop_table(self, table_name):
return self.execute_and_fetch(self.generate_drop_table(table_name))
class CouldNotConvertStringToNumericValueException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class ColumnMaxLengthLimitExceededException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class MissingSqliteBckModuleException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class CouldNotParseInputException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class BadHeaderException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class EncodedQueryException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class CannotUnzipStdInException(Exception):
def __init__(self):
pass
class UniversalNewlinesExistException(Exception):
def __init__(self):
pass
class UnprovidedStdInException(Exception):
def __init__(self):
pass
class EmptyDataException(Exception):
def __init__(self):
pass
class MissingHeaderException(Exception):
def __init__(self,msg):
self.msg = msg
class FileNotFoundException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class ColumnCountMismatchException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class StrictModeColumnCountMismatchException(Exception):
def __init__(self,expected_col_count,actual_col_count):
self.expected_col_count = expected_col_count
self.actual_col_count = actual_col_count
class FluffyModeColumnCountMismatchException(Exception):
def __init__(self,expected_col_count,actual_col_count):
self.expected_col_count = expected_col_count
self.actual_col_count = actual_col_count
# Simplistic Sql "parsing" class... We'll eventually require a real SQL parser which will provide us with a parse tree
#
# A "qtable" is a filename which behaves like an SQL table...
class Sql(object):
def __init__(self, sql):
# Currently supports only standard SELECT statements
# Holds original SQL
self.sql = sql
# Holds sql parts
self.sql_parts = sql.split()
# Set of qtable names
self.qtable_names = set()
# Dict from qtable names to their positions in sql_parts. Value here is a *list* of positions,
# since it is possible that the same qtable_name (file) is referenced in multiple positions
# and we don't want the database table to be recreated for each
# reference
self.qtable_name_positions = {}
# Dict from qtable names to their effective (actual database) table
# names
self.qtable_name_effective_table_names = {}
self.query_column_names = None
# Go over all sql parts
idx = 0
while idx < len(self.sql_parts):
# Get the part string
part = self.sql_parts[idx]
# If it's a FROM or a JOIN
if part.upper() in ['FROM', 'JOIN']:
# and there is nothing after it,
if idx == len(self.sql_parts) - 1:
# Just fail
raise Exception(
'FROM/JOIN is missing a table name after it')
qtable_name = self.sql_parts[idx + 1]
# Otherwise, the next part contains the qtable name. In most cases the next part will be only the qtable name.
# We handle one special case here, where this is a subquery as a column: "SELECT (SELECT ... FROM qtable),100 FROM ...".
# In that case, there will be an ending paranthesis as part of the name, and we want to handle this case gracefully.
# This is obviously a hack of a hack :) Just until we have
# complete parsing capabilities
if ')' in qtable_name:
leftover = qtable_name[qtable_name.index(')'):]
self.sql_parts.insert(idx + 2, leftover)
qtable_name = qtable_name[:qtable_name.index(')')]
self.sql_parts[idx + 1] = qtable_name
self.qtable_names.add(qtable_name)
if qtable_name not in self.qtable_name_positions.keys():
self.qtable_name_positions[qtable_name] = []
self.qtable_name_positions[qtable_name].append(idx + 1)
idx += 2
else:
idx += 1
def set_effective_table_name(self, qtable_name, effective_table_name):
if qtable_name not in self.qtable_names:
raise Exception("Unknown qtable %s" % qtable_name)
if qtable_name in self.qtable_name_effective_table_names.keys():
raise Exception(
"Already set effective table name for qtable %s" % qtable_name)
self.qtable_name_effective_table_names[
qtable_name] = effective_table_name
def get_effective_sql(self,original_names=False):
if len(list(filter(lambda x: x is None, self.qtable_name_effective_table_names))) != 0:
raise Exception('There are qtables without effective tables')
effective_sql = [x for x in self.sql_parts]
for qtable_name, positions in six.iteritems(self.qtable_name_positions):
for pos in positions:
if not original_names:
effective_sql[pos] = self.qtable_name_effective_table_names[
qtable_name]
else:
effective_sql[pos] = "`%s`" % qtable_name
return " ".join(effective_sql)
def get_qtable_name_effective_table_names(self):
return self.qtable_name_effective_table_names
def execute_and_fetch(self, db):
db_results_obj = db.execute_and_fetch(self.get_effective_sql())
return db_results_obj
class LineSplitter(object):
def __init__(self, delimiter, expected_column_count):
self.delimiter = delimiter
self.expected_column_count = expected_column_count
if delimiter is not None:
escaped_delimiter = re.escape(delimiter)
self.split_regexp = re.compile('(?:%s)+' % escaped_delimiter)
else:
self.split_regexp = re.compile(r'\s+')
def split(self, line):
if line and line[-1] == '\n':
line = line[:-1]
return self.split_regexp.split(line, max_split=self.expected_column_count)
class TableColumnInferer(object):
def __init__(self, mode, expected_column_count, input_delimiter, skip_header=False,disable_column_type_detection=False):
self.inferred = False
self.mode = mode
self.rows = []
self.skip_header = skip_header
self.header_row = None
self.header_row_filename = None
self.expected_column_count = expected_column_count
self.input_delimiter = input_delimiter
self.disable_column_type_detection = disable_column_type_detection
def analyze(self, filename, col_vals):
if self.inferred:
raise Exception("Already inferred columns")
if self.skip_header and self.header_row is None:
self.header_row = col_vals
self.header_row_filename = filename
else:
self.rows.append(col_vals)
if len(self.rows) < 100:
return False
self.do_analysis()
return True
def force_analysis(self):
# This method is called whenever there is no more data, and an analysis needs
# to be performed immediately, regardless of the amount of sample data that has
# been collected
self.do_analysis()
def determine_type_of_value(self, value):
if self.disable_column_type_detection:
return str
if value is not None:
value = value.strip()
if value == '' or value is None:
return None
try:
i = int(value)
if type(i) == long:
return long
else:
return int
except:
pass
try:
f = float(value)
return float
except:
pass
return str
def determine_type_of_value_list(self, value_list):
type_list = [self.determine_type_of_value(v) for v in value_list]
all_types = set(type_list)
if len(set(type_list)) == 1:
# all the sample lines are of the same type
return type_list[0]
else:
# check for the number of types without nulls,
type_list_without_nulls = list(filter(
lambda x: x is not None, type_list))
# If all the sample lines are of the same type,
if len(set(type_list_without_nulls)) == 1:
# return it
return type_list_without_nulls[0]
else:
return str
def do_analysis(self):
if self.mode == 'strict':
self._do_strict_analysis()
elif self.mode in ['relaxed', 'fluffy']:
self._do_relaxed_analysis()
else:
raise Exception('Unknown parsing mode %s' % self.mode)
if self.column_count == 1 and self.expected_column_count != 1:
print("Warning: column count is one - did you provide the correct delimiter?", file=sys.stderr)
self.infer_column_types()
self.infer_column_names()
def validate_column_names(self, value_list):
column_name_errors = []
for v in value_list:
if v is None:
# we allow column names to be None, in relaxed mode it'll be filled with default names.
# RLRL
continue
if ',' in v:
column_name_errors.append(
(v, "Column name cannot contain commas"))
continue
if self.input_delimiter in v:
column_name_errors.append(
(v, "Column name cannot contain the input delimiter. Please make sure you've set the correct delimiter"))
continue
if '\n' in v:
column_name_errors.append(
(v, "Column name cannot contain newline"))
continue
if v != v.strip():
column_name_errors.append(
(v, "Column name contains leading/trailing spaces"))
continue
try:
v.encode("utf-8", "strict").decode("utf-8")
except:
column_name_errors.append(
(v, "Column name must be UTF-8 Compatible"))
continue
# We're checking for column duplication for each field in order to be able to still provide it along with other errors
if len(list(filter(lambda x: x == v,value_list))) > 1:
entry = (v, "Column name is duplicated")
# Don't duplicate the error report itself
if entry not in column_name_errors:
column_name_errors.append(entry)
continue
nul_index = v.find("\x00")
if nul_index >= 0:
column_name_errors.append(
(v, "Column name cannot contain NUL"))
continue
t = self.determine_type_of_value(v)
if t != str:
column_name_errors.append((v, "Column name must be a string"))
return column_name_errors
def infer_column_names(self):
if self.header_row is not None:
column_name_errors = self.validate_column_names(self.header_row)
if len(column_name_errors) > 0:
raise BadHeaderException("Header must contain only strings and not numbers or empty strings: '%s'\n%s" % (
",".join(self.header_row), "\n".join(["'%s': %s" % (x, y) for x, y in column_name_errors])))
# use header row in order to name columns
if len(self.header_row) < self.column_count:
if self.mode == 'strict':
raise ColumnCountMismatchException("Strict mode. Header row contains less columns than expected column count(%s vs %s)" % (
len(self.header_row), self.column_count))
elif self.mode in ['relaxed', 'fluffy']:
# in relaxed mode, add columns to fill the missing ones
self.header_row = self.header_row + \
['c%s' % (x + len(self.header_row) + 1)
for x in range(self.column_count - len(self.header_row))]
elif len(self.header_row) > self.column_count:
if self.mode == 'strict':
raise ColumnCountMismatchException("Strict mode. Header row contains more columns than expected column count (%s vs %s)" % (
len(self.header_row), self.column_count))
elif self.mode in ['relaxed', 'fluffy']:
# In relaxed mode, just cut the extra column names
self.header_row = self.header_row[:self.column_count]
self.column_names = self.header_row
else:
# Column names are cX starting from 1
self.column_names = ['c%s' % (i + 1)
for i in range(self.column_count)]
def _do_relaxed_analysis(self):
column_count_list = [len(col_vals) for col_vals in self.rows]
if len(self.rows) == 0:
self.column_count = 0
else:
if self.expected_column_count is not None:
self.column_count = self.expected_column_count
else:
# If not specified, we'll take the largest row in the sample rows
self.column_count = max(column_count_list)
def get_column_count_summary(self, column_count_list):
counts = {}
for column_count in column_count_list:
counts[column_count] = counts.get(column_count, 0) + 1
return six.u(", ").join([six.u("{} rows with {} columns".format(v, k)) for k, v in six.iteritems(counts)])
def _do_strict_analysis(self):
column_count_list = [len(col_vals) for col_vals in self.rows]
if len(set(column_count_list)) != 1:
raise ColumnCountMismatchException('Strict mode. Column Count is expected to identical. Multiple column counts exist at the first part of the file. Try to check your delimiter, or change to relaxed mode. Details: %s' % (
self.get_column_count_summary(column_count_list)))
self.column_count = len(self.rows[0])
if self.expected_column_count is not None and self.column_count != self.expected_column_count:
raise ColumnCountMismatchException('Strict mode. Column count is expected to be %s but is %s' % (
self.expected_column_count, self.column_count))
self.infer_column_types()
def infer_column_types(self):
self.column_types = []
self.column_types2 = []
for column_number in range(self.column_count):
column_value_list = [
row[column_number] if column_number < len(row) else None for row in self.rows]
column_type = self.determine_type_of_value_list(column_value_list)
self.column_types.append(column_type)
column_value_list2 = [row[column_number] if column_number < len(
row) else None for row in self.rows[1:]]
column_type2 = self.determine_type_of_value_list(
column_value_list2)
self.column_types2.append(column_type2)
comparison = map(
lambda x: x[0] == x[1], zip(self.column_types, self.column_types2))
if False in comparison and not self.skip_header:
number_of_column_types = len(set(self.column_types))
if number_of_column_types == 1 and list(set(self.column_types))[0] == str:
print('Warning - There seems to be header line in the file, but -H has not been specified. All fields will be detected as text fields, and the header line will appear as part of the data', file=sys.stderr)
def get_column_dict(self):
return dict(zip(self.column_names, self.column_types))
def get_column_count(self):
return self.column_count
def get_column_names(self):
return self.column_names
def get_column_types(self):
return self.column_types
def py3_encoded_csv_reader(encoding, f, dialect, is_stdin,**kwargs):
try:
csv_reader = csv.reader(f, dialect, **kwargs)
for row in csv_reader:
yield row
except ValueError as e:
if e.message is not None and e.message.startswith('could not convert string to'):
raise CouldNotConvertStringToNumericValueException(e.message)
else:
raise CouldNotParseInputException(str(e))
except Exception as e:
if str(e).startswith("field larger than field limit"):
raise ColumnMaxLengthLimitExceededException(str(e))
elif 'universal-newline' in str(e):
raise UniversalNewlinesExistException()
else:
raise
def py2_encoded_csv_reader(encoding, f, dialect, is_stdin, **kwargs):
try:
csv_reader = csv.reader(f, dialect, **kwargs)
if encoding is not None and encoding != 'none':
for row in csv_reader:
yield [unicode(x, encoding) for x in row]
else:
for row in csv_reader:
yield row
except ValueError as e:
if e.message is not None and e.message.startswith('could not convert string to'):
raise CouldNotConvertStringToNumericValueException(e.message)
else:
raise CouldNotParseInputException(str(e))
except Exception as e:
if str(e).startswith("field larger than field limit"):
raise ColumnMaxLengthLimitExceededException(str(e))
elif 'universal-newline' in str(e):
raise UniversalNewlinesExistException()
else:
raise
if six.PY2:
encoded_csv_reader = py2_encoded_csv_reader
else:
encoded_csv_reader = py3_encoded_csv_reader
def normalized_filename(filename):
if filename == '-':
return 'stdin'
else:
return filename
class TableCreatorState(object):
NEW = 'NEW'
INITIALIZED = 'INITIALIZED'
ANALYZED = 'ANALYZED'
FULLY_READ = 'FULLY_READ'
class MaterializedFileState(object):
def __init__(self,filename,f,encoding,dialect,is_stdin):
self.filename = filename
self.lines_read = 0
self.f = f
self.encoding = encoding
self.dialect = dialect
self.is_stdin = is_stdin
self.skipped_bom = False
def read_file_using_csv(self):
# This is a hack for utf-8 with BOM encoding in order to skip the BOM. python's csv module
# has a bug which prevents fixing it using the proper encoding, and it has been encountered by
# multiple people.
if self.encoding == 'utf-8-sig' and self.lines_read == 0 and not self.skipped_bom:
try:
if six.PY2:
BOM = self.f.read(3)
else:
BOM = self.f.buffer.read(3)
if BOM != six.b('\xef\xbb\xbf'):
raise Exception('Value of BOM is not as expected - Value is "%s"' % str(BOM))
except Exception as e:
raise Exception('Tried to skip BOM for "utf-8-sig" encoding and failed. Error message is ' + str(e))
csv_reader = encoded_csv_reader(self.encoding, self.f, is_stdin=self.is_stdin,dialect=self.dialect)
try:
for col_vals in csv_reader:
self.lines_read += 1
yield col_vals
except ColumnMaxLengthLimitExceededException as e:
msg = "Column length is larger than the maximum. Offending file is '%s' - Line is %s, counting from 1 (encoding %s). The line number is the raw line number of the file, ignoring whether there's a header or not" % (self.filename,self.lines_read + 1,self.encoding)
raise ColumnMaxLengthLimitExceededException(msg)
except UniversalNewlinesExistException as e2:
# No need to translate the exception, but we want it to be explicitly defined here for clarity
raise UniversalNewlinesExistException()
def close(self):
if self.f != sys.stdin:
self.f.close()
class TableCreator(object):
def __init__(self, db, filenames_str, line_splitter, skip_header=False, gzipped=False, with_universal_newlines=False, encoding='UTF-8', mode='fluffy', expected_column_count=None, input_delimiter=None,disable_column_type_detection=False,
stdin_file=None,stdin_filename='-'):
self.db = db
self.filenames_str = filenames_str
self.skip_header = skip_header
self.gzipped = gzipped
self.table_created = False
self.line_splitter = line_splitter
self.encoding = encoding
self.mode = mode
self.expected_column_count = expected_column_count
self.input_delimiter = input_delimiter
self.stdin_file = stdin_file
self.stdin_filename = stdin_filename
self.with_universal_newlines = with_universal_newlines
self.column_inferer = TableColumnInferer(
mode, expected_column_count, input_delimiter, skip_header,disable_column_type_detection)
# Filled only after table population since we're inferring the table
# creation data
self.table_name = None
self.pre_creation_rows = []
self.buffered_inserts = []
self.effective_column_names = None
# Column type indices for columns that contain numeric types. Lazily initialized
# so column inferer can do its work before this information is needed
self.numeric_column_indices = None
self.materialized_file_list = self.materialize_file_list()
self.materialized_file_dict = {}
self.state = TableCreatorState.NEW
def materialize_file_list(self):
materialized_file_list = []
# Get the list of filenames
filenames = self.filenames_str.split("+")
# for each filename (or pattern)
for fileglob in filenames:
# Allow either stdin or a glob match
if fileglob == self.stdin_filename:
materialized_file_list.append(self.stdin_filename)
else:
materialized_file_list += glob.glob(fileglob)
# If there are no files to go over,
if len(materialized_file_list) == 0:
raise FileNotFoundException(
"No files matching '%s' have been found" % self.filenames_str)
return materialized_file_list
def get_table_name(self):
return self.table_name
def open_file(self,filename):
# TODO Support universal newlines for gzipped and stdin data as well
# Check if it's standard input or a file
if filename == self.stdin_filename:
if self.stdin_file is None:
raise UnprovidedStdInException()
f = self.stdin_file
if self.gzipped:
raise CannotUnzipStdInException()
else:
if self.gzipped or filename.endswith('.gz'):
f = codecs.iterdecode(gzip.GzipFile(fileobj=io.open(filename,'rb')),encoding=self.encoding)
else:
if six.PY3:
if self.with_universal_newlines:
f = io.open(filename, 'rU',newline=None,encoding=self.encoding)
else:
f = io.open(filename, 'r', newline=None, encoding=self.encoding)
else:
if self.with_universal_newlines:
file_opening_mode = 'rbU'
else:
file_opening_mode = 'rb'
f = open(filename, file_opening_mode)
return f
def _pre_populate(self,dialect):
# For each match
for filename in self.materialized_file_list:
if filename in self.materialized_file_dict.keys():
continue
f = self.open_file(filename)
is_stdin = filename == self.stdin_filename
mfs = MaterializedFileState(filename,f,self.encoding,dialect,is_stdin)
self.materialized_file_dict[filename] = mfs
def _should_skip_extra_headers(self, filenumber, filename, mfs, col_vals):
if not self.skip_header:
return False
if filenumber == 0:
return False
header_already_exists = self.column_inferer.header_row is not None
is_extra_header = self.skip_header and mfs.lines_read == 1 and header_already_exists
if is_extra_header:
if tuple(self.column_inferer.header_row) != tuple(col_vals):
raise BadHeaderException("Extra header {} in file {} mismatches original header {} from file {}. Table name is {}".format(",".join(col_vals),mfs.filename,",".join(self.column_inferer.header_row),self.column_inferer.header_row_filename,self.filenames_str))
return is_extra_header
def _populate(self,dialect,stop_after_analysis=False):
total_data_lines_read = 0
# For each match
for filenumber,filename in enumerate(self.materialized_file_list):
mfs = self.materialized_file_dict[filename]
try:
try:
for col_vals in mfs.read_file_using_csv():
if self._should_skip_extra_headers(filenumber,filename,mfs,col_vals):
continue
self._insert_row(filename, col_vals)
if stop_after_analysis and self.column_inferer.inferred:
return
if mfs.lines_read == 0 and self.skip_header:
raise MissingHeaderException("Header line is expected but missing in file %s" % filename)
total_data_lines_read += mfs.lines_read - (1 if self.skip_header else 0)
except StrictModeColumnCountMismatchException as e:
raise ColumnCountMismatchException(
'Strict mode - Expected %s columns instead of %s columns in file %s row %s. Either use relaxed/fluffy modes or check your delimiter' % (
e.expected_col_count, e.actual_col_count, normalized_filename(mfs.filename), mfs.lines_read))
except FluffyModeColumnCountMismatchException as e:
raise ColumnCountMismatchException(
'Deprecated fluffy mode - Too many columns in file %s row %s (%s fields instead of %s fields). Consider moving to either relaxed or strict mode' % (
normalized_filename(mfs.filename), mfs.lines_read, e.actual_col_count, e.expected_col_count))
finally:
if not stop_after_analysis:
mfs.close()
self._flush_inserts()
if not self.table_created:
self.column_inferer.force_analysis()
self._do_create_table(filename)
if total_data_lines_read == 0:
raise EmptyDataException()
def populate(self,dialect,stop_after_analysis=False):
if self.state == TableCreatorState.NEW:
self._pre_populate(dialect)
self.state = TableCreatorState.INITIALIZED
if self.state == TableCreatorState.INITIALIZED:
self._populate(dialect,stop_after_analysis=True)
self.state = TableCreatorState.ANALYZED
if stop_after_analysis:
return
if self.state == TableCreatorState.ANALYZED:
self._populate(dialect,stop_after_analysis=False)
self.state = TableCreatorState.FULLY_READ
return
def _flush_pre_creation_rows(self, filename):
for i, col_vals in enumerate(self.pre_creation_rows):
if self.skip_header and i == 0:
# skip header line
continue
self._insert_row(filename, col_vals)
self._flush_inserts()
self.pre_creation_rows = []
def _insert_row(self, filename, col_vals):
# If table has not been created yet
if not self.table_created:
# Try to create it along with another "example" line of data
self.try_to_create_table(filename, col_vals)
# If the table is still not created, then we don't have enough data, just
# store the data and return
if not self.table_created: