Skip to content

Commit

Permalink
db(...).select().group_by_value(db.table.field), thanks Yair
Browse files Browse the repository at this point in the history
  • Loading branch information
Massimo Di Pierro authored and Massimo Di Pierro committed Feb 22, 2012
1 parent 620c61b commit cd20ee3
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 42 deletions.
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Version 1.99.4 (2012-02-21 16:25:39) stable
Version 1.99.4 (2012-02-22 11:32:05) stable
4 changes: 2 additions & 2 deletions applications/examples/views/default/who.html
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,9 @@ <h3>
</li><li>Thadeus Burgess (validators)
</li><li>Tim Michelsen (Sphinx documentation)
</li><li>Timothy Farrell (python 2.6 compliance, windows support)
</li><li>Yair Eshel (internationalizaiton)
</li><li>Yair Eshel (internationalizaiton, DAL improvement)
</li><li>Yannis Aribaud (CAS compliance)
</li><li>Yarko Tymciurak (design, Sphinx documentation)
</li><li>Yarko Tymciurak (design)
</li><li>Younghyun Jo (internationalization)
</li><li>Vidul Nikolaev Petrov (captcha)
</li><li>Vinicius Assef
Expand Down
96 changes: 57 additions & 39 deletions gluon/dal.py
Original file line number Diff line number Diff line change
Expand Up @@ -930,7 +930,7 @@ def insert(self, table, fields):
self.execute(query)
except Exception, e:
if isinstance(e,self.integrity_error_class()):
return None
return None
raise e
if hasattr(table,'_primarykey'):
return dict([(k[0].name, k[1]) for k in fields \
Expand Down Expand Up @@ -1190,7 +1190,7 @@ def _select(self, query, fields, attributes):
for tablename in self.tables(field):
if not tablename in tablenames:
tablenames.append(tablename)

if use_common_filters(query):
query = self.common_filter(query,tablenames)

Expand Down Expand Up @@ -1470,15 +1470,15 @@ def parse_value(self, value, field_type):
try:
value = value.decode(self.db._db_codec)
except Exception:
pass
pass
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(field_type, SQLCustomType):
value = field_type.decoder(value)
if not isinstance(field_type, str) or value is None:
return value
elif field_type in ('string', 'text', 'password', 'upload'):
return value
return value
else:
key = regex_type.match(field_type).group(0)
return self.parsemap[key](value,field_type)
Expand Down Expand Up @@ -1522,7 +1522,7 @@ def parse_datetime(self, value, field_type):

def parse_blob(self, value, field_type):
return base64.b64decode(str(value))

def parse_decimal(self, value, field_type):
decimals = int(field_type[8:-1].split(',')[-1])
if self.dbengine == 'sqlite':
Expand All @@ -1536,7 +1536,7 @@ def parse_list_integers(self, value, field_type):
value = bar_decode_integer(value)
return value

def parse_list_references(self, value, field_type):
def parse_list_references(self, value, field_type):
if not self.dbengine=='google:datastore':
value = bar_decode_integer(value)
return [self.parse_reference(r, field_type[5:]) for r in value]
Expand Down Expand Up @@ -1568,7 +1568,7 @@ def build_parsemap(self):
'blob':self.parse_blob,
'decimal':self.parse_decimal,
'list:integer':self.parse_list_integers,
'list:reference':self.parse_list_references,
'list:reference':self.parse_list_references,
'list:string':self.parse_list_strings,
}

Expand Down Expand Up @@ -1974,10 +1974,10 @@ def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
elif library == "postgres:psycopg2":
self.driver = self.drivers.get('psycopg2')
elif library == "postgres:pg8000":
self.driver = self.drivers.get('pg8000')
self.driver = self.drivers.get('pg8000')
if not self.driver:
raise RuntimeError, "%s is not available" % library

self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__)
def connect(msg=msg,driver_args=driver_args):
return self.driver.connect(msg,**driver_args)
Expand Down Expand Up @@ -2073,8 +2073,8 @@ class OracleAdapter(BaseAdapter):
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
Expand Down Expand Up @@ -3911,7 +3911,7 @@ def represent(self, obj, fieldtype):
elif fieldtype == 'list:string' or fieldtype == 'list:integer' or fieldtype == 'list:reference':
return value #raise SyntaxError, "Not Supported"
return value

#Safe determines whether a asynchronious request is done or a synchronious action is done
#For safety, we use by default synchronious requests
def insert(self,table,fields,safe=None):
Expand All @@ -3920,14 +3920,14 @@ def insert(self,table,fields,safe=None):
ctable = self.connection[table._tablename]
values = dict((k.name,self.represent(v,table[k.name].type)) for k,v in fields)
ctable.insert(values,safe=safe)
return int(str(values['_id']), 16)
return int(str(values['_id']), 16)

def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None, isCapped=False):
if isCapped:
raise RuntimeError, "Not implemented"
else:
pass

def count(self,query,distinct=None,snapshot=True):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
Expand Down Expand Up @@ -3966,7 +3966,7 @@ def expand(self, expression, field_type=None):
raise SyntaxError, 'second argument must be of type bson.objectid.ObjectId or an objectid representable integer'
elif expression.second == 0:
expression.second = pymongo.objectid.ObjectId('000000000000000000000000')
return expression.op(expression.first, expression.second)
return expression.op(expression.first, expression.second)
if isinstance(expression, Field):
if expression.type=='id':
return "_id"
Expand All @@ -3988,16 +3988,16 @@ def expand(self, expression, field_type=None):
return ','.join(self.represent(item,field_type) for item in expression)
else:
return expression

def _select(self,query,fields,attributes):
from pymongo import son

for key in set(attributes.keys())-set(('limitby','orderby')):
raise SyntaxError, 'invalid select attribute: %s' % key

new_fields=[]
mongosort_list = []

# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
Expand All @@ -4007,18 +4007,18 @@ def _select(self,query,fields,attributes):
if isinstance(orderby, (list, tuple)):
print "in xorify"
orderby = xorify(orderby)


# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:],-1))
else:
mongosort_list.append((f,1))
print "mongosort_list = %s" % mongosort_list
print "mongosort_list = %s" % mongosort_list

if limitby:
# a tuple
# a tuple
limitby_skip,limitby_limit = limitby
else:
limitby_skip = 0
Expand All @@ -4029,7 +4029,7 @@ def _select(self,query,fields,attributes):

#if distinct:
#print "in distinct %s" % distinct

mongofields_dict = son.SON()
mongoqry_dict = {}
for item in fields:
Expand All @@ -4049,7 +4049,7 @@ def _select(self,query,fields,attributes):
for f in fieldnames:
mongofields_dict[f.name] = 1 # ie field=1
return tablename, mongoqry_dict, mongofields_dict, mongosort_list, limitby_limit, limitby_skip

# need to define all the 'sql' methods gt,lt etc....

def select(self,query,fields,attributes,count=False,snapshot=False):
Expand All @@ -4069,10 +4069,10 @@ def select(self,query,fields,attributes,count=False,snapshot=False):
return {'count' : ctable.find(mongoqry_dict,mongofields_dict,skip=limitby_skip, limit=limitby_limit, sort=mongosort_list,snapshot=snapshot).count()}
else:
mongo_list_dicts = ctable.find(mongoqry_dict,mongofields_dict,skip=limitby_skip, limit=limitby_limit, sort=mongosort_list,snapshot=snapshot) # pymongo cursor object
print "mongo_list_dicts=%s" % mongo_list_dicts
print "mongo_list_dicts=%s" % mongo_list_dicts
#if mongo_list_dicts.count() > 0: #
#colnames = mongo_list_dicts[0].keys() # assuming all docs have same "shape", grab colnames from first dictionary (aka row)
#else:
#else:
#colnames = mongofields_dict.keys()
#print "colnames = %s" % colnames
#rows = [row.values() for row in mongo_list_dicts]
Expand All @@ -4095,7 +4095,7 @@ def select(self,query,fields,attributes,count=False,snapshot=False):

def INVERT(self,first):
#print "in invert first=%s" % first
return '-%s' % self.expand(first)
return '-%s' % self.expand(first)

def drop(self, table, mode=''):
ctable = self.connection[table._tablename]
Expand Down Expand Up @@ -4208,7 +4208,7 @@ def EQ(self,first,second):
#return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
result[self.expand(first)] = self.expand(second)
return result

def NE(self, first, second=None):
print "in NE"
result = {}
Expand Down Expand Up @@ -4345,7 +4345,7 @@ def EQ(self,first,second):
#return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
result[self.expand(first)] = self.expand(second)
return result

def NE(self, first, second=None):
print "in NE"
result = {}
Expand Down Expand Up @@ -4453,14 +4453,14 @@ class IMAPAdapter(NoSQLAdapter):
IMAP server mailbox list information.
Here is a list of supported fields:
Field Type Description
################################################################
uid string
uid string
answered boolean Flag
created date
created date
content list:string A list of text or html parts
to string
to string
cc string
bcc string
size integer the amount of octets of the message*
Expand All @@ -4470,7 +4470,7 @@ class IMAPAdapter(NoSQLAdapter):
sender string
recent boolean Flag
seen boolean Flag
subject string
subject string
mime string The mime header declaration
email string The complete RFC822 message**
attachments list:string Each non text decoded part as string
Expand Down Expand Up @@ -4498,7 +4498,7 @@ class IMAPAdapter(NoSQLAdapter):
# Count today's unseen messages
# smaller than 6000 octets from the
# inbox mailbox
q = imapdb.INBOX.seen == False
q &= imapdb.INBOX.created == datetime.date.today()
q &= imapdb.INBOX.size < 6000
Expand All @@ -4524,7 +4524,7 @@ class IMAPAdapter(NoSQLAdapter):
# It is possible also to mark messages for deletion instead of ereasing them
# directly with set.update(deleted=True)
"""

types = {
Expand Down Expand Up @@ -6384,7 +6384,7 @@ def __init__(
if not field.name in fieldnames and not field.type=='id':
field = copy.copy(field)
# correct self references
if not table._actual and field.type == 'reference '+table._tablename:
if not table._actual and field.type == 'reference '+table._tablename:
field.type = 'reference '+self._tablename
newfields.append(field)
fieldnames.add(field.name)
Expand Down Expand Up @@ -7373,7 +7373,7 @@ def isempty(self):
def count(self,distinct=None):
return self.db._adapter.count(self.query,distinct)

def select(self, *fields, **attributes):
def select(self, *fields, **attributes):
adapter = self.db._adapter
fields = adapter.expand_all(fields, adapter.tables(self.query))
return adapter.select(self.query,fields,attributes)
Expand Down Expand Up @@ -7624,6 +7624,23 @@ def sort(self, f, reverse=False):
"""
return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)

def group_by_value(self, field):
"""
regroups the rows, by one of the fields
"""
if not self.records:
return {}
key = str(field)
grouped_row_group = dict()

for row in self:
value = row[key]
if not value in grouped_row_group:
grouped_row_group[value] = [row]
else:
grouped_row_group[value].append(row)
return grouped_row_group

def as_list(self,
compact=True,
storage_to_dict=True,
Expand Down Expand Up @@ -8000,3 +8017,4 @@ def test_all():
import doctest
doctest.testmod()


0 comments on commit cd20ee3

Please sign in to comment.